{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np \n",
    "import tensorflow as tf \n",
    "import pandas as pd\n",
    "import pprint\n",
    "p = pprint.PrettyPrinter(indent=4)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 生成one-hot词向量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "dict1 = {}\n",
    "with open(\"tag_list.csv\", \"r+\", encoding=\"utf-8\") as f:\n",
    "    all = f.readlines()\n",
    "    for each in all[1:]:\n",
    "        each = each.split(',')\n",
    "        tag_vec = np.zeros(29)\n",
    "        tag_vec[int(each[0]) - 1] = 1        \n",
    "        dict1[each[1][:-1]] =  tag_vec"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pymysql as sql\n",
    "db = sql.connect(host='xmu-maker.cn',user='root', password='zq',port=3306)\n",
    "cursor = db.cursor()\n",
    "cursor.execute(\"use films\")\n",
    "\n",
    "sql_cmd = 'select * from movies'\n",
    "cursor.execute(sql_cmd)\n",
    "result = cursor.fetchall()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "21678\n"
     ]
    }
   ],
   "source": [
    "print(len(result))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "df = pd.read_csv(\"films.csv\")\n",
    "df.fillna(value='',inplace=True) # 这个地方需要改成读数据库"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 筛选有效信息\n",
    "list1 = []\n",
    "for each in result:\n",
    "    tmp = []\n",
    "    year = ''\n",
    "    tmp.append(each[0])\n",
    "    tmp.append(each[1])\n",
    "    tmp.append(each[4].split(\"/\"))\n",
    "    tmp.append(each[5].split(\"/\"))\n",
    "    if not each[6]:\n",
    "        year = \"2012\"\n",
    "    tmp.append(year.split(\" \")[0].split('/')[0])\n",
    "    list1.append(tmp)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\Anaconda\\envs\\tf\\lib\\site-packages\\numpy\\lib\\npyio.py:528: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray.\n",
      "  arr = np.asanyarray(arr)\n"
     ]
    }
   ],
   "source": [
    "np.save(\"films_list.npy\", list1) # 保存文件"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "21678 [[1 '门锁' list(['悬疑', '犯罪']) list(['白百何', '白客', '范丞丞']) '']\n",
      " [2 '扬名立万' list(['悬疑', '喜剧', '剧情']) list(['尹正', '邓家佳', '喻恩泰']) '']\n",
      " [3 '不速来客' list(['悬疑', '喜剧']) list(['范伟', '窦骁', '张颂文']) '']\n",
      " ...\n",
      " [21681 '集结号' list(['战争']) list(['张涵予', '邓超', '袁文康']) '']\n",
      " [21682 '哈利·波特与魔法石' list(['奇幻', '冒险'])\n",
      "  list(['丹尼尔·雷德克里夫', '鲁伯特·格林特', '艾玛·沃特森']) '']\n",
      " [21683 '地道战' list(['战争']) list(['朱龙广', '王炳彧', '张勇手']) '']]\n"
     ]
    }
   ],
   "source": [
    " new_list = np.load(\"films_list.npy\", allow_pickle=\"TRUE\") # 读文件\n",
    " print(len(new_list), new_list)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 遍历生成向量\n",
    "tags_vecc = []\n",
    "for each in new_list:\n",
    "    vec = np.zeros(29)\n",
    "    for tag in each[2]:   # 这个地方需要做归一\n",
    "        if tag in dict1.keys():\n",
    "            vec = vec + dict1[tag]  \n",
    "    each[2] = vec / len(each[2])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "21678 [2 '扬名立万'\n",
      " array([0., 1., 0., 1., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "        0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])\n",
      " list(['尹正', '邓家佳', '喻恩泰']) '']\n"
     ]
    }
   ],
   "source": [
    "np.save(\"tags_vec.npy\", new_list) # 保存文件\n",
    "print(len(new_list),new_list[1])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 对演员进行处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "32024 ['白百何', '白客', '范丞丞', '尹正', '邓家佳', '喻恩泰', '范伟', '窦骁', '张颂文', '孔晓振']\n"
     ]
    }
   ],
   "source": [
    "actors = {};\n",
    "dup_actors = []\n",
    "for each in new_list:\n",
    "    for actor in each[3]:\n",
    "        dup_actors.append(actor)\n",
    "        if len(actor)==0 or actor[0] == '?':\n",
    "            continue\n",
    "        if actor not in actors.keys():\n",
    "            actors[actor] = 1\n",
    "        else:\n",
    "            actors[actor] += 1 \n",
    "            \n",
    "main_actor = []\n",
    "for actor, key in actors.items():\n",
    "    # if key >= 5:\n",
    "    main_actor.append(actor)\n",
    "print(len(main_actor), main_actor[:10]) # 32024"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "tensor_actors =  tf.convert_to_tensor(main_actor)   # 不重复\n",
    "tensor_dup_actors =  tf.convert_to_tensor(dup_actors) # 全部"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "actors_vocabulary = tf.keras.layers.StringLookup(mask_token=None)\n",
    "actors_vocabulary.adapt(tensor_dup_actors) # 词汇表"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [],
   "source": [
    "embeding_dim = 32"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "actors_model = tf.keras.Sequential([\n",
    "    actors_vocabulary,\n",
    "    tf.keras.layers.Embedding(actors_vocabulary.vocabulary_size(), embeding_dim)\n",
    "])\n",
    "actors_embeding = actors_model.predict(tensor_actors) # 生成向量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[[-0.036537    0.00367962 -0.01700809 ... -0.0297085  -0.04210622\n",
      "   0.02363512]\n",
      " [-0.03938555 -0.00746332 -0.03001251 ... -0.00765115 -0.03406441\n",
      "   0.0192184 ]\n",
      " [-0.02873847  0.00160531 -0.03261195 ...  0.00424068  0.0182705\n",
      "  -0.02102694]\n",
      " ...\n",
      " [-0.00430353 -0.01745879 -0.02278423 ... -0.04445572 -0.00937896\n",
      "   0.02837977]\n",
      " [-0.02980988  0.01419402  0.03032413 ... -0.02374318 -0.02954924\n",
      "  -0.03300748]\n",
      " [-0.04169844 -0.04557239  0.00960777 ...  0.03698986  0.02241694\n",
      "  -0.00330427]] 32024\n"
     ]
    }
   ],
   "source": [
    "print(actors_embeding, len(main_actor))\n",
    "np.save('actors.npy', actors_embeding[1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[2 '扬名立万'\n",
      " array([0., 1., 0., 1., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "        0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])\n",
      " list(['尹正', '邓家佳', '喻恩泰']) '']\n"
     ]
    }
   ],
   "source": [
    "new_list = np.load(\"tags_vec.npy\", allow_pickle=\"TRUE\") # 读文件\n",
    "print(new_list[1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 生成电影演员向量\n",
    "films_actors_ver = []\n",
    "for i in range(len(new_list)):\n",
    "    each = new_list[i]\n",
    "    vec = np.zeros(32)\n",
    "    for actor in each[3]:\n",
    "        if len(actor)==0 or actor[0] == '?':\n",
    "            continue\n",
    "        index = main_actor.index(actor)\n",
    "        vec += actors_embeding[index]\n",
    "    new_list[i][3] = vec\n",
    "    try:\n",
    "        year = int(new_list[i][4])\n",
    "    except ValueError:\n",
    "        year = 2012\n",
    "    new_list[i][4] = (2021 - year) / 30 # 最大"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "21678 [1 '门锁'\n",
      " array([0., 0., 0., 0., 0., 0., 0., 0., 1., 1., 0., 0., 0., 0., 0., 0., 0.,\n",
      "        0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])\n",
      " array([-0.10466102, -0.00217839, -0.07963256, -0.01440912,  0.063554  ,\n",
      "        -0.02231003, -0.04113979, -0.03688204, -0.10199331,  0.02227357,\n",
      "         0.09790229, -0.02171341,  0.04380156, -0.03569859,  0.01126916,\n",
      "        -0.02153091,  0.0432877 ,  0.06055772, -0.12565631, -0.03619149,\n",
      "        -0.03011446,  0.0901689 ,  0.0061611 ,  0.01734333, -0.0687953 ,\n",
      "         0.02951275, -0.03415914,  0.00194225, -0.01974169, -0.03311897,\n",
      "        -0.05790013,  0.02182657])\n",
      " 0.3]\n"
     ]
    }
   ],
   "source": [
    "np.save(\"films_tags_actors_vec.npy\", new_list)\n",
    "print(len(new_list), new_list[0])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 生成电影的特征向量"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[2 '扬名立万'\n",
      " array([0., 1., 0., 1., 0., 0., 0., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0.,\n",
      "        0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])\n",
      " array([ 0.00849329,  0.06398953, -0.07839381, -0.03686136, -0.07288034,\n",
      "         0.06806786,  0.01483729, -0.012692  ,  0.01741196, -0.04611109,\n",
      "         0.00814718, -0.01675093,  0.01332685,  0.03829227,  0.00168256,\n",
      "        -0.03220497, -0.00297867,  0.04401799,  0.01024236, -0.01648495,\n",
      "         0.03094299, -0.03344157,  0.0252257 , -0.00547996, -0.0447191 ,\n",
      "         0.04911379,  0.0203924 , -0.05134765,  0.03793834,  0.01464812,\n",
      "        -0.00284256, -0.09940291])\n",
      " 0.3]\n"
     ]
    }
   ],
   "source": [
    "list_vec = np.load(\"films_tags_actors_vec.npy\", allow_pickle=\"TRUE\") # 读文件\n",
    "print(list_vec[1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "21678 [ 0.          1.          0.          1.          0.          0.\n",
      "  0.          0.          1.          0.          0.          0.\n",
      "  0.          0.          0.          0.          0.          0.\n",
      "  0.          0.          0.          0.          0.          0.\n",
      "  0.          0.          0.          0.          0.          0.00849329\n",
      "  0.06398953 -0.07839381 -0.03686136 -0.07288034  0.06806786  0.01483729\n",
      " -0.012692    0.01741196 -0.04611109  0.00814718 -0.01675093  0.01332685\n",
      "  0.03829227  0.00168256 -0.03220497 -0.00297867  0.04401799  0.01024236\n",
      " -0.01648495  0.03094299 -0.03344157  0.0252257  -0.00547996 -0.0447191\n",
      "  0.04911379  0.0203924  -0.05134765  0.03793834  0.01464812 -0.00284256\n",
      " -0.09940291  0.3       ]\n"
     ]
    }
   ],
   "source": [
    "# 21683\n",
    "films_vec_dict = []\n",
    "films_mid = []\n",
    "for i in range(len(list_vec)):\n",
    "    tmp = np.append(list_vec[i][2], list_vec[i][3])\n",
    "    tmp = np.append(tmp, list_vec[i][4])\n",
    "\n",
    "    films_mid.append(list_vec[i][0])\n",
    "    films_vec_dict.append(tmp)\n",
    "    \n",
    "    \n",
    "print(len(films_vec_dict), films_vec_dict[1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {},
   "outputs": [],
   "source": [
    "np.save(\"films_vec.npy\", films_vec_dict)\n",
    "np.save(\"films_mid.npy\", films_mid)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# KNN算法"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {},
   "outputs": [],
   "source": [
    "films_vec = np.load(\"films_vec.npy\", allow_pickle=\"TRUE\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[ 0.          1.          0.          0.          0.          0.\n",
      "  0.          0.          1.          0.          0.          0.\n",
      "  0.          0.          0.          0.          0.          0.\n",
      "  0.          0.          0.          0.          0.          0.\n",
      "  0.          0.          0.          0.          0.          0.05022713\n",
      "  0.06806524  0.01969857  0.02338002 -0.10735967  0.00514672 -0.06145382\n",
      "  0.03081784  0.00318936  0.08318197 -0.02103283  0.05389309  0.0708149\n",
      " -0.03301393 -0.04621442 -0.03745029  0.00366843  0.00898413  0.07808842\n",
      " -0.04919504  0.0326986  -0.07615674  0.02870244  0.0013892   0.0335093\n",
      "  0.0061256  -0.02217126 -0.02557385 -0.05254648  0.07596426  0.02757077\n",
      " -0.00590638  0.3       ]\n"
     ]
    }
   ],
   "source": [
    "print(films_vec[4])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "06b4f7c561f76ad499c5882876df75324df0d287869e55b80005d36e4919f42b"
  },
  "kernelspec": {
   "display_name": "Python 3.9.7 64-bit ('tf': conda)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.7"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
