{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 506,
   "metadata": {
    "execution": {
     "iopub.execute_input": "2024-05-24T11:17:24.546143Z",
     "iopub.status.busy": "2024-05-24T11:17:24.545473Z",
     "iopub.status.idle": "2024-05-24T11:17:25.882798Z",
     "shell.execute_reply": "2024-05-24T11:17:25.881719Z",
     "shell.execute_reply.started": "2024-05-24T11:17:24.546097Z"
    },
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "from sklearn.metrics.pairwise import linear_kernel\n",
    "from collections import Counter\n",
    "from spellchecker import SpellChecker\n",
    "import ast\n",
    "import re"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 507,
   "metadata": {},
   "outputs": [],
   "source": [
    "merged_data = pd.read_csv(\"updata/merge_data.csv\", na_filter=False)"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "对输入的语句的正确性处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 508,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Indie Films\n"
     ]
    }
   ],
   "source": [
    "# 输入新的电影名称\n",
    "movie_title = \"Indie Films\"\n",
    "# 使用正则表达式去除非字母字符\n",
    "new_movie_title = re.sub(r'[^a-zA-Z0-9\\s]', '', movie_title)\n",
    "\n",
    "print(new_movie_title)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 509,
   "metadata": {},
   "outputs": [],
   "source": [
    "# spell = SpellChecker()\n",
    "# # 分词并尝试纠正每个单词，如果无法纠正就使用原单词\n",
    "# corrected_words = []\n",
    "# for word in new_movie_title.split():\n",
    "#     try:\n",
    "#         # 尝试纠正单词\n",
    "#         corrected_word = spell.correction(word)\n",
    "#         # 确保添加到列表的是字符串\n",
    "#         corrected_words.append(corrected_word if corrected_word is not None else word)\n",
    "#     except Exception as e:\n",
    "#         # 如果发生错误，使用原单词并记录错误信息\n",
    "#         corrected_words.append(word)\n",
    "#         print(f\"Error correcting word '{word}': {str(e)}\")\n",
    "\n",
    "# # 将纠正后的单词组合回一句完整的文本\n",
    "# new_movie_title = ' '.join(corrected_words)\n",
    "\n",
    "# print(new_movie_title)"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "文本相似度处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 510,
   "metadata": {},
   "outputs": [],
   "source": [
    "###首先根据title和overview计算"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 511,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(45435, 81999)"
      ]
     },
     "execution_count": 511,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "\n",
    "# 定义一个移除所有english stop words如'the', 'a'的转换器\n",
    "tfidf = TfidfVectorizer(stop_words='english')\n",
    "\n",
    "# 将标题和概述用空格隔开并存储到一个数组（列表）中\n",
    "title_list = ((merged_data['title']+' ')*5+ ' '+ (merged_data['comb'])+' '+(merged_data['overview']+' ')).tolist()\n",
    "#title_list = ((merged_data['title']+' ')*2+ ' '+ (merged_data['comb'])).tolist()\n",
    "\n",
    "title_list.append(new_movie_title)\n",
    "\n",
    "# 构建 TF-IDF 矩阵\n",
    "tfidf = TfidfVectorizer(stop_words='english')\n",
    "tfidf_matrix = tfidf.fit_transform(title_list)\n",
    "\n",
    "# 输出矩阵形状\n",
    "tfidf_matrix.shape"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 512,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 获取词汇表\n",
    "feature_names = tfidf.get_feature_names_out()\n",
    "\n",
    "# 获取新电影的TF-IDF向量\n",
    "new_movie_vector = tfidf_matrix[-1]\n",
    "\n",
    "# 将TF-IDF向量转换为词素集合\n",
    "new_movie_tokens = set(feature_names[i] for i in new_movie_vector.indices)\n",
    "\n",
    "# 获取所有其他电影的词素集合\n",
    "other_movie_tokens = set()\n",
    "for doc in tfidf_matrix[:-1]:\n",
    "    other_movie_tokens.update(feature_names[i] for i in doc.indices)\n",
    "\n",
    "# 计算新电影词素集合中在其他电影词素集合中出现的词素集合\n",
    "common_tokens = new_movie_tokens.intersection(other_movie_tokens)\n",
    "\n",
    "# # 输出新电影词素集合中在其他电影词素集合中出现的词素集合\n",
    "# print(\"新电影词素集合中在其他电影词素集合中出现的词素集合:\")\n",
    "# print(common_tokens)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "请点击[此处](https://ai.baidu.com/docs#/AIStudio_Project_Notebook/a38e5576)查看本环境基本用法.  <br>\n",
    "Please click [here ](https://ai.baidu.com/docs#/AIStudio_Project_Notebook/a38e5576) for more detailed instructions. "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 513,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 计算余弦相似度，仅计算新电影与其他电影的相似度\n",
    "cosine_sim_1 = linear_kernel(tfidf_matrix[-1:], tfidf_matrix[:-1])\n",
    "\n",
    "# 获取与新电影最相似的前20个电影\n",
    "sim_scores_1 = list(enumerate(cosine_sim_1[0]))\n",
    "# print(sim_scores_1[:5])\n",
    "# sim_scores = sorted(sim_scores_1, key=lambda x: x[1], reverse=True)[:20]\n",
    "\n",
    "# # 获取最相似电影的索引和相似度分数\n",
    "# movie_indices = [i[0] for i in sim_scores]\n",
    "# scores = [score[1] for score in sim_scores]\n",
    "\n",
    "# # 输出推荐结果\n",
    "# recommendations = merged_data['title'].iloc[movie_indices]\n",
    "# for title, score in zip(recommendations, scores):\n",
    "#     print(f\"Title: {title}, Similarity Score: {score}\")\n",
    "\n",
    "# #将结果保存到文件\n",
    "# output_df = pd.DataFrame({\n",
    "#     'movieId': merged_data.iloc[movie_indices]['id'],\n",
    "#     'title': merged_data.iloc[movie_indices]['title'],\n",
    "#     'similarity_score': scores\n",
    "# })\n",
    "# output_df.to_csv('result/similar_movies.csv', index=False, header=True)\n",
    "\n",
    "# print(\"Results saved to 'result/similar_movies.csv'\")"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "###其次根据keywords计算"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 514,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(45435, 12596)"
      ]
     },
     "execution_count": 514,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "\n",
    "# 定义一个移除所有english stop words如'the', 'a'的转换器\n",
    "tfidf = TfidfVectorizer(stop_words='english')\n",
    "# 清理 NaN 值，将其替换为空字符串\n",
    "keywords_list = (merged_data['keywords']).tolist()\n",
    "keywords_list.append(new_movie_title)\n",
    "# # 打印前5个组合后的结果以进行验证\n",
    "# print(\"前5个组合后的结果:\", keywords_list[:5])\n",
    "\n",
    "# 构建 TF-IDF 矩阵\n",
    "tfidf = TfidfVectorizer(stop_words='english')\n",
    "tfidf_matrix_2 = tfidf.fit_transform(keywords_list)\n",
    "\n",
    "# 输出矩阵形状\n",
    "tfidf_matrix_2.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 515,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "新关键词素集合中在其他电影词素集合中出现的词素集合:\n",
      "{'indie', 'films'}\n"
     ]
    }
   ],
   "source": [
    "# 获取词汇表\n",
    "feature_names = tfidf.get_feature_names_out()\n",
    "\n",
    "# 获取新keywords的TF-IDF向量\n",
    "new_keywords_vector = tfidf_matrix_2[-1]\n",
    "\n",
    "# 将TF-IDF向量转换为词素集合\n",
    "new_keywords_tokens = set(feature_names[i] for i in new_keywords_vector.indices)\n",
    "\n",
    "# 获取所有其他电影的词素集合\n",
    "other_keywords_tokens = set()\n",
    "for doc in tfidf_matrix_2[:-1]:\n",
    "    other_keywords_tokens.update(feature_names[i] for i in doc.indices)\n",
    "\n",
    "# 计算新电影词素集合中在其他电影词素集合中出现的词素集合\n",
    "common_tokens_2 = new_keywords_tokens.intersection(other_keywords_tokens)\n",
    "\n",
    "# 输出新电影词素集合中在其他电影词素集合中出现的词素集合\n",
    "print(\"新关键词素集合中在其他电影词素集合中出现的词素集合:\")\n",
    "print(common_tokens_2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 516,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 计算余弦相似度，仅计算新电影与其他关键词的相似度\n",
    "cosine_sim_2 = linear_kernel(tfidf_matrix_2[-1:], tfidf_matrix_2[:-1])\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 517,
   "metadata": {},
   "outputs": [],
   "source": [
    "###把他们合起来"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 518,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "两个集合的交集:\n",
      "{'indie', 'films'}\n"
     ]
    }
   ],
   "source": [
    "# 计算两个集合的交集\n",
    "intersection_tokens = common_tokens.intersection(common_tokens_2)\n",
    "\n",
    "# 输出两个集合的交集\n",
    "print(\"两个集合的交集:\")\n",
    "print(intersection_tokens)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 519,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "两个集合的并集:\n",
      "{'indie', 'films'}\n"
     ]
    }
   ],
   "source": [
    "##计算集合的并集\n",
    "# 使用 union() 方法\n",
    "union_set = common_tokens.union(common_tokens_2)\n",
    "union_set = union_set.union(intersection_tokens)\n",
    "# 输出两个集合的交集\n",
    "print(\"两个集合的并集:\")\n",
    "print(intersection_tokens)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 520,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2\n",
      "2\n",
      "2\n",
      "2\n",
      "2\n",
      "{'indie', 'films'}\n"
     ]
    }
   ],
   "source": [
    "print(len(common_tokens_2))\n",
    "print(len(common_tokens))\n",
    "print(len(intersection_tokens))\n",
    "print(len(union_set))\n",
    "print(len(new_movie_tokens))\n",
    "print(new_movie_tokens)"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "#做一个合并"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 521,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.5\n"
     ]
    }
   ],
   "source": [
    "##计算参数的方法\n",
    "if(len(common_tokens_2)==len(intersection_tokens)==len(union_set)):\n",
    "    C2 = 0.5\n",
    "elif(len(common_tokens_2)==len(intersection_tokens)==0):\n",
    "    C2 = 0\n",
    "elif(len(common_tokens)==len(intersection_tokens)==0):\n",
    "    C2 = 1\n",
    "else:\n",
    "    C2 = (len(common_tokens_2)+len(intersection_tokens)/2)/(len(common_tokens)+len(common_tokens_2))\n",
    "print(C2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 522,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "             id                                              title popularity  \\\n",
      "40135  287982.0  vhs massacre: cult films and the decline of ph...    1.39872   \n",
      "15599   42039.0                             all tomorrow's parties   0.413309   \n",
      "19783   80215.0                              indie game: the movie   7.843916   \n",
      "13934   38971.0                             pavement: slow century   0.042737   \n",
      "39776   56531.0                 the life and death of a porno gang   1.990855   \n",
      "43548  366143.0                            binky nelson unpacified   3.232132   \n",
      "5511    11868.0                                  horror of dracula   8.355326   \n",
      "38852  376916.0                                          ten years   1.042834   \n",
      "27780  162592.0                  the lumière brothers' first films   0.036823   \n",
      "44026   67742.0                                              s&man   0.637189   \n",
      "\n",
      "      vote_average vote_count   ad     score  \n",
      "40135          5.4        7.0  1.0  0.301428  \n",
      "15599          7.0        2.0  1.0  0.234873  \n",
      "19783          7.4      208.0  1.0  0.193855  \n",
      "13934         10.0        1.0  1.0  0.182594  \n",
      "39776          6.5       16.0  1.0  0.155337  \n",
      "43548          5.7       12.0  1.0  0.154581  \n",
      "5511           7.2      124.0  1.0  0.142704  \n",
      "38852          7.2        5.0  1.0  0.134611  \n",
      "27780          7.0        1.0  1.0  0.130904  \n",
      "44026          4.9        8.0  1.0  0.121064  \n"
     ]
    }
   ],
   "source": [
    "# 余弦结果获取与输入最相似的电影\n",
    "sim_scores_1 = list(enumerate(cosine_sim_1[0]))\n",
    "# 获余弦结果取与输入最相似的关键词\n",
    "sim_scores_2 = list(enumerate(cosine_sim_2[0]))\n",
    "\n",
    "# 计算加权平均相似度分数\n",
    "# 每个相似度分数列表的权重都是0.5\n",
    "combined_sim_scores = [(idx, (1-C2)* score1 + C2 * score2) for (idx, score1), (_, score2) in zip(sim_scores_1, sim_scores_2)]\n",
    "\n",
    "# 对合并后的相似度分数进行排序，取得最相似的条目\n",
    "combined_sim_scores = sorted(combined_sim_scores, key=lambda x: x[1], reverse=True)\n",
    "\n",
    "# 筛选出分数大于0的条目\n",
    "non_zero_scores = [item for item in combined_sim_scores if item[1] > 0]\n",
    "\n",
    "# 对这些条目按分数进行降序排序\n",
    "non_zero_scores.sort(key=lambda x: x[1], reverse=True)\n",
    "\n",
    "top_n_indexes = [item[0] for item in non_zero_scores]\n",
    "\n",
    "scores = [score[1] for score in non_zero_scores]\n",
    "\n",
    "# 从merged_data中获取相应的id和title\n",
    "top_n_data = merged_data.loc[top_n_indexes, ['id', 'title','popularity','vote_average','vote_count','ad']]\n",
    "\n",
    "# 添加分数列\n",
    "top_n_data['score'] = scores  # 直接添加列\n",
    "top_n_data = top_n_data[:10]\n",
    "# 显示结果\n",
    "print(top_n_data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 523,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "           id                                              title  popularity  \\\n",
      "40135  287982  vhs massacre: cult films and the decline of ph...    1.398720   \n",
      "15599   42039                             all tomorrow's parties    0.413309   \n",
      "19783   80215                              indie game: the movie    7.843916   \n",
      "13934   38971                             pavement: slow century    0.042737   \n",
      "39776   56531                 the life and death of a porno gang    1.990855   \n",
      "43548  366143                            binky nelson unpacified    3.232132   \n",
      "5511    11868                                  horror of dracula    8.355326   \n",
      "38852  376916                                          ten years    1.042834   \n",
      "27780  162592                  the lumière brothers' first films    0.036823   \n",
      "44026   67742                                              s&man    0.637189   \n",
      "\n",
      "       vote_average  vote_count   ad     score  \n",
      "40135           5.4         7.0  1.0  0.301428  \n",
      "15599           7.0         2.0  1.0  0.234873  \n",
      "19783           7.4       208.0  1.0  0.193855  \n",
      "13934          10.0         1.0  1.0  0.182594  \n",
      "39776           6.5        16.0  1.0  0.155337  \n",
      "43548           5.7        12.0  1.0  0.154581  \n",
      "5511            7.2       124.0  1.0  0.142704  \n",
      "38852           7.2         5.0  1.0  0.134611  \n",
      "27780           7.0         1.0  1.0  0.130904  \n",
      "44026           4.9         8.0  1.0  0.121064  \n"
     ]
    }
   ],
   "source": [
    "# 第一步：将top_n_data的ID列转换为浮点数类型（处理小数形式）\n",
    "top_n_data['id'] = pd.to_numeric(top_n_data['id'], errors='coerce')\n",
    "\n",
    "# 第二步：删除无法转换为数值的ID（NaN）\n",
    "top_n_data = top_n_data.dropna(subset=['id'])\n",
    "\n",
    "# 第三步：将浮点数转换为整数类型（取整去掉小数部分）\n",
    "top_n_data['id'] = top_n_data['id'].astype(int)\n",
    "\n",
    "# 确保需要计算的列都是数值类型\n",
    "columns_to_convert = ['popularity', 'vote_average', 'vote_count', 'score','ad']\n",
    "for column in columns_to_convert:\n",
    "    top_n_data[column] = pd.to_numeric(top_n_data[column], errors='coerce')\n",
    "\n",
    "# 删除转换过程中出现的NaN值\n",
    "top_n_data = top_n_data.dropna(subset=columns_to_convert)\n",
    "print(top_n_data)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 524,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "           id                                              title  weighted_sum\n",
      "19783   80215                              indie game: the movie     14.730353\n",
      "13934   38971                             pavement: slow century     13.514983\n",
      "40135  287982  vhs massacre: cult films and the decline of ph...     11.927973\n",
      "5511    11868                                  horror of dracula     11.403808\n",
      "15599   42039                             all tomorrow's parties     10.758695\n",
      "39776   56531                 the life and death of a porno gang      6.384453\n",
      "38852  376916                                          ten years      5.984916\n",
      "43548  366143                            binky nelson unpacified      5.500646\n",
      "27780  162592                  the lumière brothers' first films      4.763209\n",
      "44026   67742                                              s&man      0.494678\n"
     ]
    }
   ],
   "source": [
    "# 定义映射范围和权重参数\n",
    "mapping = {\n",
    "    'popularity': {'range': (0, 10), 'weight': 0.5},\n",
    "    'ad': {'range': (1, 1), 'weight': 0.1},\n",
    "    'vote_average': {'range': (0, 10), 'weight': 1},\n",
    "    'vote_count': {'range': (0, 10), 'weight': 0.1},\n",
    "    'score': {'range': (0, 1), 'weight': 10}\n",
    "}\n",
    "\n",
    "def map_to_range(value, original_min, original_max, target_min, target_max):\n",
    "    \"\"\" 将值从原始范围映射到目标范围 \"\"\"\n",
    "    original_range = original_max - original_min\n",
    "    target_range = target_max - target_min\n",
    "    if original_range == 0:\n",
    "        original_range = target_min\n",
    "    normalized_value = (value - original_min) / original_range  # 归一化到 [0, 1]\n",
    "    mapped_value = target_min + (normalized_value * target_range)  # 映射到目标范围\n",
    "    return mapped_value\n",
    "# 初始化加权和列\n",
    "top_n_data['weighted_sum'] = 0\n",
    "\n",
    "# 原始范围可以从数据的实际最小值和最大值中计算\n",
    "for column, params in mapping.items():\n",
    "    target_min, target_max = params['range']\n",
    "    weight = params['weight']\n",
    "    original_min = top_n_data[column].min()\n",
    "    original_max = top_n_data[column].max()\n",
    "    \n",
    "    # 映射值并计算加权和\n",
    "    top_n_data['weighted_sum'] += map_to_range(top_n_data[column], original_min, original_max, target_min, target_max) * weight\n",
    "    #print( map_to_range(top_n_data[column], original_min, original_max, target_min, target_max))\n",
    "\n",
    "# 根据加权和排序\n",
    "data_sorted = top_n_data.sort_values(by='weighted_sum', ascending=False)\n",
    "\n",
    "# 输出排序后的数据\n",
    "print(data_sorted[['id', 'title', 'weighted_sum']])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 525,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[80215, 38971, 287982, 11868, 42039, 56531, 376916, 366143, 162592, 67742]\n"
     ]
    }
   ],
   "source": [
    "# 提取 id 列\n",
    "id_list = data_sorted['id'].tolist()\n",
    "\n",
    "# 输出 id 列表\n",
    "print(id_list)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 526,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[NbConvertApp] Converting notebook a.ipynb to python\n",
      "[NbConvertApp] Writing 8509 bytes to a.py\n"
     ]
    }
   ],
   "source": [
    "try:\n",
    "    !jupyter nbconvert --to python a\n",
    "except:\n",
    "    pass"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "pytorch",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.17"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
