{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Train LFM model"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### model define"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "from file_titles import *\n",
    "\n",
    "import signal, os\n",
    "import pickle\n",
    "import numpy as np\n",
    "import scipy.io as sio\n",
    "import scipy.sparse as ss\n",
    "from numpy.random import random  \n",
    "from collections import defaultdict\n",
    "import matplotlib.pyplot as plt\n",
    "import time"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "class MyLFM():\n",
    "    def __init__(self, **init_para):\n",
    "        self.userEventScores = sio.mmread(userEventScores_file_title).todense()\n",
    "        self.n_users, self.n_items = self.userEventScores.shape\n",
    "        self.init_SVD(**init_para)\n",
    "    \n",
    "    def init_SVD(self, K=20, alpha = 0.01, lambda_default = 1, lambda_u = None, lambda_i = None, lambda_bu = None, lambda_bi = None):\n",
    "        #初始化模型参数（for 基于模型的协同过滤SVD_CF）\n",
    "        self.K = K  \n",
    "\n",
    "        #初始化正则参数\n",
    "        if lambda_u == None:\n",
    "            lambda_u = lambda_default\n",
    "        if lambda_i == None:\n",
    "            lambda_i = lambda_default\n",
    "        if lambda_bu == None:\n",
    "            lambda_bu = lambda_default\n",
    "        if lambda_bi == None:\n",
    "            lambda_bi = lambda_default\n",
    "\n",
    "        self.lambda_u = lambda_u\n",
    "        self.lambda_i = lambda_i\n",
    "        self.lambda_bu = lambda_bu\n",
    "        self.lambda_bi = lambda_bi\n",
    "\n",
    "        #初始化学习率\n",
    "        self.alpha = alpha\n",
    "\n",
    "        #init parameters\n",
    "        #bias\n",
    "        self.mu = 0\n",
    "        self.bu = np.zeros(self.n_users) \n",
    "        self.bi = np.zeros(self.n_items)  \n",
    "\n",
    "\n",
    "        #the small matrix\n",
    "        self.P = random((self.n_users,self.K))/10*(np.sqrt(self.K))\n",
    "        self.Q = random((self.n_items,self.K))/10*(np.sqrt(self.K))  \n",
    "        \n",
    "        self._loss_info = []\n",
    "        self.loop_cnt = 1\n",
    "        \n",
    "    def train_SVD(self):\n",
    "        #训练SVD模型（for 基于模型的协同过滤SVD_CF）\n",
    "        #gamma：为学习率\n",
    "        #Lambda：正则参数\n",
    "\n",
    "        nrows, ncols = self.userEventScores.shape\n",
    "        valid_index_list = []\n",
    "\n",
    "        total = 0\n",
    "        for i in range(nrows):\n",
    "            for j in range(ncols):\n",
    "                if self.userEventScores[i, j]:\n",
    "                    valid_index_list.append((i, j))\n",
    "                    total += self.userEventScores[i, j]\n",
    "\n",
    "        self.mu = total/len(valid_index_list)\n",
    "\n",
    "        score = np.zeros((self.n_users, self.n_items))\n",
    "        for i, j in valid_index_list:\n",
    "            score[i, j] = self.userEventScores[i, j] - self.mu\n",
    "\n",
    "        e = np.zeros((self.n_users, self.n_items))\n",
    "\n",
    "        tmp_bu = np.zeros(self.n_users) \n",
    "        tmp_bi = np.zeros(self.n_items)  \n",
    "        tmp_P = random((self.n_users,self.K))/10*(np.sqrt(self.K))\n",
    "        tmp_Q = random((self.n_items,self.K))/10*(np.sqrt(self.K))\n",
    "        \n",
    "        self._loss_info = [1000000000000]\n",
    "        _loss_info = self._loss_info\n",
    "\n",
    "        for i, j in valid_index_list:\n",
    "            e[i, j] = score[i, j] - (self.bu[i] + self.bi[j] + np.dot(self.P[i,:],self.Q[j,:].T))\n",
    "\n",
    "        while True:\n",
    "            alpha = self.alpha / self.loop_cnt\n",
    "            for i in range(self.n_users):\n",
    "                tmp_bu[i] = self.bu[i] + alpha * ( e[i, :].sum() - self.lambda_bu * self.bu[i])\n",
    "                for k in range(self.K):\n",
    "                    tmp_P[i, k] = self.P[i, k] + alpha * (np.dot(e[i, :], self.Q[:, k])- self.lambda_u * self.P[i, k])\n",
    "\n",
    "            for j in range(self.n_items):\n",
    "                tmp_bi[j] = self.bi[j] + alpha * ( e[:, j].sum() - self.lambda_bi * self.bi[j])\n",
    "                for k in range(self.K):\n",
    "                    tmp_Q[j, k] = self.Q[j, k] + alpha * (np.dot(e[:, j].T, self.P[:, k]) - self.lambda_i * self.Q[j, k])\n",
    "\n",
    "            s = signal.signal(signal.SIGINT, signal.SIG_IGN)\n",
    "            tmp = self.bu \n",
    "            self.bu = tmp_bu\n",
    "            tmp_bu = tmp\n",
    "            \n",
    "            tmp = self.bi \n",
    "            self.bi = tmp_bi\n",
    "            tmp_bi = tmp\n",
    "            \n",
    "            tmp = self.P \n",
    "            self.P = tmp_P\n",
    "            tmp_P = tmp\n",
    "            \n",
    "            tmp = self.Q \n",
    "            self.Q = tmp_Q\n",
    "            tmp_Q = tmp\n",
    "            signal.signal(signal.SIGINT, s)\n",
    "            \n",
    "            for i, j in valid_index_list:\n",
    "                e[i, j] = score[i, j] - (self.bu[i] + self.bi[j] + np.dot(self.P[i,:],self.Q[j,:].T))\n",
    "            total_loss = np.square(e).sum() + self.lambda_u * np.square(self.P).sum() + self.lambda_i * np.square(self.Q).sum() + \\\n",
    "                                            self.lambda_bu * np.square(self.bu).sum() + self.lambda_bi * np.square(self.bi).sum()\n",
    "\n",
    "            total_loss /= 2\n",
    "            #if total_loss > _loss_info[-1]:\n",
    "            self.loop_cnt += 1\n",
    "            _loss_info.append(total_loss)\n",
    "\n",
    "            print('\\r', total_loss, end = '')\n",
    "            \n",
    "            if len(_loss_info) > 10:\n",
    "                if _loss_info[-10] - min(_loss_info[-10:]) < 0.0001:\n",
    "                    break;\n",
    "\n",
    "            \n",
    "        print('\\n')\n",
    "    \n",
    "    def pred_SVD(self, uid, i_id):\n",
    "        #根据当前参数，预测用户uid对Item（i_id）的打分        \n",
    "        ans=self.mu + self.bi[i_id] + self.bu[uid] + np.dot(self.P[uid,:],self.Q[:,i_id])  \n",
    "\n",
    "        #将打分范围控制在0-1之间\n",
    "        if ans>1:  \n",
    "            return 1  \n",
    "        elif ans<0:  \n",
    "            return 0\n",
    "        return ans\n",
    "    \n",
    "    def store_model_data(self):\n",
    "        stored_data = {\n",
    "            'mu': self.mu,\n",
    "            'bu': self.bu,\n",
    "            'bi': self.bi,\n",
    "            'P' : self.P,\n",
    "            'Q' : self.Q,\n",
    "            \n",
    "            'lambda_u': self.lambda_u,\n",
    "            'lambda_i': self.lambda_i,\n",
    "            'lambda_bu': self.lambda_bu,\n",
    "            'lambda_bi': self.lambda_bi,\n",
    "            \n",
    "            'loop_cnt' : self.loop_cnt,\n",
    "            '_loss_info' : self._loss_info,\n",
    "            'alpha': self.alpha\n",
    "        }\n",
    "        np.savez(LFM_model_data, **stored_data)\n",
    "#         with open(LFM_model_data, 'wb') as f:\n",
    "#             pkl = pickle.Pickler(f)\n",
    "#             # dump main data\n",
    "#             pkl.dump(self.mu)\n",
    "#             pkl.dump(self.bu)\n",
    "#             pkl.dump(self.bi)\n",
    "#             pkl.dump(self.P)\n",
    "#             pkl.dump(self.Q)\n",
    "\n",
    "#             # dump train relationed\n",
    "#             pkl.dump(self.alpha)\n",
    "#             pkl.dump(self.loop_cnt)\n",
    "#             pkl.dump(self._loss_info)\n",
    "        \n",
    "    def load_model_data(self):\n",
    "        stored_data = np.load(LFM_model_data)\n",
    "        self.mu = stored_data['mu']\n",
    "        self.bu = stored_data['bu']\n",
    "        self.bi = stored_data['bi']\n",
    "        self.P = stored_data['P']\n",
    "        self.Q = stored_data['Q']\n",
    "        \n",
    "        self.lambda_u = stored_data['lambda_u']\n",
    "        self.lambda_i = stored_data['lambda_i']\n",
    "        self.lambda_bu = stored_data['lambda_bu']\n",
    "        self.lambda_bi = stored_data['lambda_bi']\n",
    "        \n",
    "        self.alpha = stored_data['alpha']\n",
    "        self.loop_cnt = stored_data['loop_cnt']\n",
    "        self._loss_info = stored_data['_loss_info']\n",
    "#         with open(LFM_model_data, 'rb') as f:\n",
    "#             pkl = pickle.Unpickler(f)\n",
    "#             # load train relationed\n",
    "#             self._loss_info = pkl.load()\n",
    "#             self.loop_cnt = pkl.load()\n",
    "#             self.alpha = pkl.load()\n",
    "\n",
    "#             # load main data\n",
    "#             self.Q = pkl.load()\n",
    "#             self.P = pkl.load()\n",
    "#             self.bi = pkl.load()\n",
    "#             self.bu = pkl.load()\n",
    "#             self.mu = pkl.load()\n",
    "    \n",
    "    def loss(self):\n",
    "        e = np.zeros((self.n_users, self.n_items))\n",
    "        tmp = time.time()\n",
    "        for i in range(self.n_users):\n",
    "            for j in range(self.n_items):\n",
    "                if self.userEventScores[i, j]:\n",
    "                    e[i, j] = self.userEventScores[i, j] - (self.mu + self.bu[i] + self.bi[j] + np.dot(self.P[i,:],self.Q[j,:].T))\n",
    "        print('loop costL:{}'.format(time.time() - tmp))\n",
    "        \n",
    "        tmp = time.time()\n",
    "        total_loss = np.square(e).sum() + self.lambda_u * np.square(self.P).sum() + self.lambda_i * np.square(self.Q).sum() + \\\n",
    "                                            self.lambda_bu * np.square(self.bu).sum() + self.lambda_bi * np.square(self.bi).sum()\n",
    "        total_loss /= 2\n",
    "        print('sum costL:{}'.format(time.time() - tmp))\n",
    "        \n",
    "        return total_loss"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### init model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "load model data from file ...\n"
     ]
    }
   ],
   "source": [
    "lfm = MyLFM()\n",
    "if os.path.isfile(LFM_model_data):\n",
    "    print('load model data from file ...')\n",
    "    lfm.load_model_data()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### train model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      " 848.1167533103653\n",
      "\n"
     ]
    }
   ],
   "source": [
    "lfm.train_SVD()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### save model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loop costL:38.11781549453735\n",
      "sum costL:0.19846677780151367\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "848.1167531904044"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "lfm.loss()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "lfm.store_model_data()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
