{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "from sklearn.metrics.pairwise import linear_kernel\n",
    "from collections import Counter\n",
    "from spellchecker import SpellChecker\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "import ast\n",
    "import re"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [],
   "source": [
    "merged_data = pd.read_csv(\"updata/merge_data.csv\", na_filter=False)"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "调参测试"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "迭代1代码"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [],
   "source": [
    "def al(search,merged_data):\n",
    "    # 输入新的电影名称\n",
    "    movie_title = search\n",
    "    # 使用正则表达式去除非字母字符\n",
    "    new_movie_title = re.sub(r'[^a-zA-Z0-9\\s]', '', movie_title)\n",
    "\n",
    "    # 定义一个移除所有english stop words如'the', 'a'的转换器\n",
    "    tfidf = TfidfVectorizer(stop_words='english')\n",
    "\n",
    "    # 将标题和概述用空格隔开并存储到一个数组（列表）中\n",
    "    title_list = ((merged_data['title']+' ')*5+ ' '+ (merged_data['comb'])+' '+(merged_data['overview']+' ')).tolist()\n",
    "    #title_list = ((merged_data['title']+' ')*2+ ' '+ (merged_data['comb'])).tolist()\n",
    "\n",
    "    title_list.append(new_movie_title)\n",
    "\n",
    "    # 构建 TF-IDF 矩阵\n",
    "    tfidf = TfidfVectorizer(stop_words='english')\n",
    "    tfidf_matrix = tfidf.fit_transform(title_list)\n",
    "\n",
    "    # 输出矩阵形状\n",
    "    tfidf_matrix.shape\n",
    "\n",
    "        # 获取词汇表\n",
    "    feature_names = tfidf.get_feature_names_out()\n",
    "\n",
    "    # 获取新电影的TF-IDF向量\n",
    "    new_movie_vector = tfidf_matrix[-1]\n",
    "\n",
    "    # 将TF-IDF向量转换为词素集合\n",
    "    new_movie_tokens = set(feature_names[i] for i in new_movie_vector.indices)\n",
    "\n",
    "    # 获取所有其他电影的词素集合\n",
    "    other_movie_tokens = set()\n",
    "    for doc in tfidf_matrix[:-1]:\n",
    "        other_movie_tokens.update(feature_names[i] for i in doc.indices)\n",
    "\n",
    "    # 计算新电影词素集合中在其他电影词素集合中出现的词素集合\n",
    "    common_tokens = new_movie_tokens.intersection(other_movie_tokens)\n",
    "\n",
    "    # 计算余弦相似度，仅计算新电影与其他电影的相似度\n",
    "    cosine_sim_1 = linear_kernel(tfidf_matrix[-1:], tfidf_matrix[:-1])\n",
    "\n",
    "    # 获取与新电影最相似的前20个电影\n",
    "    sim_scores_1 = list(enumerate(cosine_sim_1[0]))\n",
    "\n",
    "    # 定义一个移除所有english stop words如'the', 'a'的转换器\n",
    "    tfidf = TfidfVectorizer(stop_words='english')\n",
    "    # 清理 NaN 值，将其替换为空字符串\n",
    "    keywords_list = (merged_data['keywords']).tolist()\n",
    "    keywords_list.append(new_movie_title)\n",
    "    # # 打印前5个组合后的结果以进行验证\n",
    "    # print(\"前5个组合后的结果:\", keywords_list[:5])\n",
    "\n",
    "    # 构建 TF-IDF 矩阵\n",
    "    tfidf = TfidfVectorizer(stop_words='english')\n",
    "    tfidf_matrix_2 = tfidf.fit_transform(keywords_list)\n",
    "\n",
    "    # 输出矩阵形状\n",
    "    tfidf_matrix_2.shape\n",
    "\n",
    "        # 获取词汇表\n",
    "    feature_names = tfidf.get_feature_names_out()\n",
    "\n",
    "    # 获取新keywords的TF-IDF向量\n",
    "    new_keywords_vector = tfidf_matrix_2[-1]\n",
    "\n",
    "    # 将TF-IDF向量转换为词素集合\n",
    "    new_keywords_tokens = set(feature_names[i] for i in new_keywords_vector.indices)\n",
    "\n",
    "    # 获取所有其他电影的词素集合\n",
    "    other_keywords_tokens = set()\n",
    "    for doc in tfidf_matrix_2[:-1]:\n",
    "        other_keywords_tokens.update(feature_names[i] for i in doc.indices)\n",
    "\n",
    "    # 计算新电影词素集合中在其他电影词素集合中出现的词素集合\n",
    "    common_tokens_2 = new_keywords_tokens.intersection(other_keywords_tokens)\n",
    "\n",
    "    # 输出新电影词素集合中在其他电影词素集合中出现的词素集合\n",
    "\n",
    "    # 计算余弦相似度，仅计算新电影与其他关键词的相似度\n",
    "    cosine_sim_2 = linear_kernel(tfidf_matrix_2[-1:], tfidf_matrix_2[:-1])\n",
    "\n",
    "    # 计算两个集合的交集\n",
    "    intersection_tokens = common_tokens.intersection(common_tokens_2)\n",
    "\n",
    "    ##计算集合的并集\n",
    "    # 使用 union() 方法\n",
    "    union_set = common_tokens.union(common_tokens_2)\n",
    "    union_set = union_set.union(intersection_tokens)\n",
    "\n",
    "    ##计算参数的方法\n",
    "    if(len(common_tokens_2)==len(intersection_tokens)==len(union_set)):\n",
    "        C2 = 0.5\n",
    "    elif(len(common_tokens_2)==len(intersection_tokens)==0):\n",
    "        C2 = 0\n",
    "    elif(len(common_tokens)==len(intersection_tokens)==0):\n",
    "        C2 = 1\n",
    "    else:\n",
    "        C2 = (len(common_tokens_2)+len(intersection_tokens)/2)/(len(common_tokens)+len(common_tokens_2))\n",
    "\n",
    "    # 余弦结果获取与输入最相似的电影\n",
    "    sim_scores_1 = list(enumerate(cosine_sim_1[0]))\n",
    "    # 获余弦结果取与输入最相似的关键词\n",
    "    sim_scores_2 = list(enumerate(cosine_sim_2[0]))\n",
    "\n",
    "    # 计算加权平均相似度分数\n",
    "    # 每个相似度分数列表的权重都是0.5\n",
    "    combined_sim_scores = [(idx, (1-C2)* score1 + C2 * score2) for (idx, score1), (_, score2) in zip(sim_scores_1, sim_scores_2)]\n",
    "\n",
    "    # 对合并后的相似度分数进行排序，取得最相似的条目\n",
    "    combined_sim_scores = sorted(combined_sim_scores, key=lambda x: x[1], reverse=True)\n",
    "\n",
    "    # 筛选出分数大于0的条目\n",
    "    non_zero_scores = [item for item in combined_sim_scores if item[1] > 0]\n",
    "\n",
    "    # 对这些条目按分数进行降序排序\n",
    "    non_zero_scores.sort(key=lambda x: x[1], reverse=True)\n",
    "\n",
    "    top_n_indexes = [item[0] for item in non_zero_scores]\n",
    "\n",
    "    scores = [score[1] for score in non_zero_scores]\n",
    "\n",
    "    # 从merged_data中获取相应的id和title\n",
    "    top_n_data = merged_data.loc[top_n_indexes, ['id', 'title','popularity','vote_average','vote_count','ad']]\n",
    "\n",
    "    # 添加分数列\n",
    "    top_n_data['score'] = scores  # 直接添加列\n",
    "    top_n_data = top_n_data[:10]\n",
    "\n",
    "    # 第一步：将top_n_data的ID列转换为浮点数类型（处理小数形式）\n",
    "    top_n_data['id'] = pd.to_numeric(top_n_data['id'], errors='coerce')\n",
    "\n",
    "    # 第二步：删除无法转换为数值的ID（NaN）\n",
    "    top_n_data = top_n_data.dropna(subset=['id'])\n",
    "\n",
    "    # 第三步：将浮点数转换为整数类型（取整去掉小数部分）\n",
    "    top_n_data['id'] = top_n_data['id'].astype(int)\n",
    "\n",
    "    # 确保需要计算的列都是数值类型\n",
    "    columns_to_convert = ['popularity', 'vote_average', 'vote_count', 'score','ad']\n",
    "    for column in columns_to_convert:\n",
    "        top_n_data[column] = pd.to_numeric(top_n_data[column], errors='coerce')\n",
    "\n",
    "    # 删除转换过程中出现的NaN值\n",
    "    top_n_data = top_n_data.dropna(subset=columns_to_convert)\n",
    "    # 定义映射范围和权重参数\n",
    "    mapping = {\n",
    "        'popularity': {'range': (0, 10), 'weight': 0.5},\n",
    "        'ad': {'range': (1, 1), 'weight': 0.1},\n",
    "        'vote_average': {'range': (0, 10), 'weight': 1},\n",
    "        'vote_count': {'range': (0, 10000), 'weight': 0.0003},\n",
    "        'score': {'range': (0, 1), 'weight': 10}\n",
    "    }\n",
    "\n",
    "    def map_to_range(value, original_min, original_max, target_min, target_max):\n",
    "        \"\"\" 将值从原始范围映射到目标范围 \"\"\"\n",
    "        original_range = original_max - original_min\n",
    "        target_range = target_max - target_min\n",
    "        if original_range == 0:\n",
    "            original_range = target_min\n",
    "        normalized_value = (value - original_min) / original_range  # 归一化到 [0, 1]\n",
    "        mapped_value = target_min + (normalized_value * target_range)  # 映射到目标范围\n",
    "        return mapped_value\n",
    "    # 初始化加权和列\n",
    "    top_n_data['weighted_sum'] = 0\n",
    "\n",
    "    # 原始范围可以从数据的实际最小值和最大值中计算\n",
    "    for column, params in mapping.items():\n",
    "        target_min, target_max = params['range']\n",
    "        weight = params['weight']\n",
    "        original_min = top_n_data[column].min()\n",
    "        original_max = top_n_data[column].max()\n",
    "        \n",
    "        # 映射值并计算加权和\n",
    "        top_n_data['weighted_sum'] += map_to_range(top_n_data[column], original_min, original_max, target_min, target_max) * weight\n",
    "        #print( map_to_range(top_n_data[column], original_min, original_max, target_min, target_max))\n",
    "\n",
    "    # 根据加权和排序\n",
    "    data_sorted = top_n_data.sort_values(by='weighted_sum', ascending=False)\n",
    "\n",
    "    # 提取 id 列\n",
    "    id_list = data_sorted['id'].tolist()\n",
    "\n",
    "    # 输出 id 列表\n",
    "    return id_list\n"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "迭代2代码"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [],
   "source": [
    "def al2(search,merged_data):\n",
    "    # 输入新的电影名称\n",
    "    movie_title = search\n",
    "    # 使用正则表达式去除非字母字符\n",
    "    new_movie_title = re.sub(r'[^a-zA-Z0-9\\s]', '', movie_title)\n",
    "\n",
    "    # 定义一个移除所有english stop words如'the', 'a'的转换器\n",
    "    tfidf = TfidfVectorizer(stop_words='english')\n",
    "\n",
    "    # 将标题和概述用空格隔开并存储到一个数组（列表）中\n",
    "    title_list = ((merged_data['title']+' ')*5+ ' '+ (merged_data['comb'])+' '+(merged_data['overview']+' ')).tolist()\n",
    "    #title_list = ((merged_data['title']+' ')*2+ ' '+ (merged_data['comb'])).tolist()\n",
    "\n",
    "    title_list.append(new_movie_title)\n",
    "\n",
    "    # 构建 TF-IDF 矩阵\n",
    "    tfidf = TfidfVectorizer(stop_words='english')\n",
    "    tfidf_matrix = tfidf.fit_transform(title_list)\n",
    "\n",
    "    # 输出矩阵形状\n",
    "    tfidf_matrix.shape\n",
    "\n",
    "        # 获取词汇表\n",
    "    feature_names = tfidf.get_feature_names_out()\n",
    "\n",
    "    # 获取新电影的TF-IDF向量\n",
    "    new_movie_vector = tfidf_matrix[-1]\n",
    "\n",
    "    # 将TF-IDF向量转换为词素集合\n",
    "    new_movie_tokens = set(feature_names[i] for i in new_movie_vector.indices)\n",
    "\n",
    "    # 获取所有其他电影的词素集合\n",
    "    other_movie_tokens = set()\n",
    "    for doc in tfidf_matrix[:-1]:\n",
    "        other_movie_tokens.update(feature_names[i] for i in doc.indices)\n",
    "\n",
    "    # 计算新电影词素集合中在其他电影词素集合中出现的词素集合\n",
    "    common_tokens = new_movie_tokens.intersection(other_movie_tokens)\n",
    "\n",
    "    # 计算余弦相似度，仅计算新电影与其他电影的相似度\n",
    "    cosine_sim_1 = linear_kernel(tfidf_matrix[-1:], tfidf_matrix[:-1])\n",
    "\n",
    "    # 获取与新电影最相似的前20个电影\n",
    "    sim_scores_1 = list(enumerate(cosine_sim_1[0]))\n",
    "\n",
    "    # 定义一个移除所有english stop words如'the', 'a'的转换器\n",
    "    tfidf = TfidfVectorizer(stop_words='english')\n",
    "    # 清理 NaN 值，将其替换为空字符串\n",
    "    keywords_list = (merged_data['keywords']).tolist()\n",
    "    keywords_list.append(new_movie_title)\n",
    "    # # 打印前5个组合后的结果以进行验证\n",
    "    # print(\"前5个组合后的结果:\", keywords_list[:5])\n",
    "\n",
    "    # 构建 TF-IDF 矩阵\n",
    "    tfidf = TfidfVectorizer(stop_words='english')\n",
    "    tfidf_matrix_2 = tfidf.fit_transform(keywords_list)\n",
    "\n",
    "    # 输出矩阵形状\n",
    "    tfidf_matrix_2.shape\n",
    "\n",
    "    # 获取词汇表\n",
    "    feature_names = tfidf.get_feature_names_out()\n",
    "\n",
    "    # 获取新keywords的TF-IDF向量\n",
    "    new_keywords_vector = tfidf_matrix_2[-1]\n",
    "\n",
    "    # 将TF-IDF向量转换为词素集合\n",
    "    new_keywords_tokens = set(feature_names[i] for i in new_keywords_vector.indices)\n",
    "\n",
    "    # 获取所有其他电影的词素集合\n",
    "    other_keywords_tokens = set()\n",
    "    for doc in tfidf_matrix_2[:-1]:\n",
    "        other_keywords_tokens.update(feature_names[i] for i in doc.indices)\n",
    "\n",
    "    # 计算新电影词素集合中在其他电影词素集合中出现的词素集合\n",
    "    common_tokens_2 = new_keywords_tokens.intersection(other_keywords_tokens)\n",
    "\n",
    "    # 输出新电影词素集合中在其他电影词素集合中出现的词素集合\n",
    "\n",
    "    # 计算余弦相似度，仅计算新电影与其他关键词的相似度\n",
    "    cosine_sim_2 = linear_kernel(tfidf_matrix_2[-1:], tfidf_matrix_2[:-1])\n",
    "\n",
    "    # 计算两个集合的交集\n",
    "    intersection_tokens = common_tokens.intersection(common_tokens_2)\n",
    "\n",
    "    ##计算集合的并集\n",
    "    # 使用 union() 方法\n",
    "    union_set = common_tokens.union(common_tokens_2)\n",
    "    union_set = union_set.union(intersection_tokens)\n",
    "\n",
    "    ##计算参数的方法\n",
    "    if(len(common_tokens_2)==len(intersection_tokens)==len(union_set)):\n",
    "        C2 = 0.5\n",
    "    elif(len(common_tokens_2)==len(intersection_tokens)==0):\n",
    "        C2 = 0\n",
    "    elif(len(common_tokens)==len(intersection_tokens)==0):\n",
    "        C2 = 1\n",
    "    else:\n",
    "        C2 = (len(common_tokens_2)+len(intersection_tokens)/2)/(len(common_tokens)+len(common_tokens_2))\n",
    "\n",
    "    # 余弦结果获取与输入最相似的电影\n",
    "    sim_scores_1 = list(enumerate(cosine_sim_1[0]))\n",
    "    # 获余弦结果取与输入最相似的关键词\n",
    "    sim_scores_2 = list(enumerate(cosine_sim_2[0]))\n",
    "\n",
    "    # 计算加权平均相似度分数\n",
    "    # 每个相似度分数列表的权重都是0.5\n",
    "    combined_sim_scores = [(idx, (1-C2)* score1 + C2 * score2) for (idx, score1), (_, score2) in zip(sim_scores_1, sim_scores_2)]\n",
    "\n",
    "    # 对合并后的相似度分数进行排序，取得最相似的条目\n",
    "    combined_sim_scores = sorted(combined_sim_scores, key=lambda x: x[1], reverse=True)\n",
    "\n",
    "    # 筛选出分数大于0的条目\n",
    "    non_zero_scores = [item for item in combined_sim_scores if item[1] > 0]\n",
    "\n",
    "    # 对这些条目按分数进行降序排序\n",
    "    non_zero_scores.sort(key=lambda x: x[1], reverse=True)\n",
    "\n",
    "    top_n_indexes = [item[0] for item in non_zero_scores]\n",
    "\n",
    "    scores = [score[1] for score in non_zero_scores]\n",
    "\n",
    "    # 从merged_data中获取相应的id和title\n",
    "    top_n_data = merged_data.loc[top_n_indexes, ['id', 'title','popularity','vote_average','vote_count','ad']]\n",
    "\n",
    "    # 添加分数列\n",
    "    top_n_data['score'] = scores  # 直接添加列\n",
    "    top_n_data = top_n_data[:10]\n",
    "\n",
    "    # 第一步：将top_n_data的ID列转换为浮点数类型（处理小数形式）\n",
    "    top_n_data['id'] = pd.to_numeric(top_n_data['id'], errors='coerce')\n",
    "\n",
    "    # 第二步：删除无法转换为数值的ID（NaN）\n",
    "    top_n_data = top_n_data.dropna(subset=['id'])\n",
    "\n",
    "    # 第三步：将浮点数转换为整数类型（取整去掉小数部分）\n",
    "    top_n_data['id'] = top_n_data['id'].astype(int)\n",
    "\n",
    "    # 确保需要计算的列都是数值类型\n",
    "    columns_to_convert = ['popularity', 'vote_average', 'vote_count', 'score','ad']\n",
    "    for column in columns_to_convert:\n",
    "        top_n_data[column] = pd.to_numeric(top_n_data[column], errors='coerce')\n",
    "\n",
    "    # 删除转换过程中出现的NaN值\n",
    "    top_n_data = top_n_data.dropna(subset=columns_to_convert)\n",
    "    # 定义映射范围和权重参数\n",
    "    mapping = {\n",
    "          'popularity': {'range': (0, 10), 'weight': 0.5},\n",
    "          'ad': {'range': (1, 1), 'weight': 0.1},\n",
    "          'vote_average': {'range': (0, 10), 'weight': 1},\n",
    "          'vote_count': {'range': (0, 10), 'weight': 0.1},\n",
    "          'score': {'range': (0, 1), 'weight': 10}\n",
    "      }\n",
    "    def map_to_range(value, original_min, original_max, target_min, target_max):\n",
    "        \"\"\" 将值从原始范围映射到目标范围 \"\"\"\n",
    "        original_range = original_max - original_min\n",
    "        target_range = target_max - target_min\n",
    "        if original_range == 0:\n",
    "            original_range = target_min\n",
    "        normalized_value = (value - original_min) / original_range  # 归一化到 [0, 1]\n",
    "        mapped_value = target_min + (normalized_value * target_range)  # 映射到目标范围\n",
    "        return mapped_value\n",
    "    # 初始化加权和列\n",
    "    top_n_data['weighted_sum'] = 0\n",
    "\n",
    "    # 原始范围可以从数据的实际最小值和最大值中计算\n",
    "    for column, params in mapping.items():\n",
    "        target_min, target_max = params['range']\n",
    "        weight = params['weight']\n",
    "        original_min = top_n_data[column].min()\n",
    "        original_max = top_n_data[column].max()\n",
    "        \n",
    "        # 映射值并计算加权和\n",
    "        top_n_data['weighted_sum'] += map_to_range(top_n_data[column], original_min, original_max, target_min, target_max) * weight\n",
    "        #print( map_to_range(top_n_data[column], original_min, original_max, target_min, target_max))\n",
    "\n",
    "    # 根据加权和排序\n",
    "    data_sorted = top_n_data.sort_values(by='weighted_sum', ascending=False)\n",
    "\n",
    "    # 提取 id 列\n",
    "    id_list = data_sorted['id'].tolist()\n",
    "\n",
    "    # 输出 id 列表\n",
    "    return id_list\n"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "迭代3"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 37,
   "metadata": {},
   "outputs": [],
   "source": [
    "def al3(search,merged_data):\n",
    "    # 输入新的电影名称\n",
    "    movie_title = search\n",
    "    # 使用正则表达式去除非字母字符\n",
    "    new_movie_title = re.sub(r'[^a-zA-Z0-9\\s]', '', movie_title)\n",
    "\n",
    "    # 定义一个移除所有english stop words如'the', 'a'的转换器\n",
    "    tfidf = TfidfVectorizer(stop_words='english')\n",
    "\n",
    "    # 将标题和概述用空格隔开并存储到一个数组（列表）中\n",
    "    title_list = ((merged_data['title']+' ')*5+ ' '+ (merged_data['comb'])+' '+(merged_data['overview']+' ')).tolist()\n",
    "    #title_list = ((merged_data['title']+' ')*2+ ' '+ (merged_data['comb'])).tolist()\n",
    "\n",
    "    title_list.append(new_movie_title)\n",
    "\n",
    "    # 构建 TF-IDF 矩阵\n",
    "    tfidf = TfidfVectorizer(stop_words='english')\n",
    "    tfidf_matrix = tfidf.fit_transform(title_list)\n",
    "\n",
    "    # 输出矩阵形状\n",
    "    tfidf_matrix.shape\n",
    "\n",
    "        # 获取词汇表\n",
    "    feature_names = tfidf.get_feature_names_out()\n",
    "\n",
    "    # 获取新电影的TF-IDF向量\n",
    "    new_movie_vector = tfidf_matrix[-1]\n",
    "\n",
    "    # 将TF-IDF向量转换为词素集合\n",
    "    new_movie_tokens = set(feature_names[i] for i in new_movie_vector.indices)\n",
    "\n",
    "    # 获取所有其他电影的词素集合\n",
    "    other_movie_tokens = set()\n",
    "    for doc in tfidf_matrix[:-1]:\n",
    "        other_movie_tokens.update(feature_names[i] for i in doc.indices)\n",
    "\n",
    "    # 计算新电影词素集合中在其他电影词素集合中出现的词素集合\n",
    "    common_tokens = new_movie_tokens.intersection(other_movie_tokens)\n",
    "\n",
    "    # 计算余弦相似度，仅计算新电影与其他电影的相似度\n",
    "    cosine_sim_1 = linear_kernel(tfidf_matrix[-1:], tfidf_matrix[:-1])\n",
    "\n",
    "    # 获取与新电影最相似的前20个电影\n",
    "    sim_scores_1 = list(enumerate(cosine_sim_1[0]))\n",
    "\n",
    "    # 定义一个移除所有english stop words如'the', 'a'的转换器\n",
    "    tfidf = TfidfVectorizer(stop_words='english')\n",
    "    # 清理 NaN 值，将其替换为空字符串\n",
    "    keywords_list = (merged_data['keywords']).tolist()\n",
    "    keywords_list.append(new_movie_title)\n",
    "    # # 打印前5个组合后的结果以进行验证\n",
    "    # print(\"前5个组合后的结果:\", keywords_list[:5])\n",
    "\n",
    "    # 构建 TF-IDF 矩阵\n",
    "    tfidf = TfidfVectorizer(stop_words='english')\n",
    "    tfidf_matrix_2 = tfidf.fit_transform(keywords_list)\n",
    "\n",
    "    # 输出矩阵形状\n",
    "    tfidf_matrix_2.shape\n",
    "\n",
    "        # 获取词汇表\n",
    "    feature_names = tfidf.get_feature_names_out()\n",
    "\n",
    "    # 获取新keywords的TF-IDF向量\n",
    "    new_keywords_vector = tfidf_matrix_2[-1]\n",
    "\n",
    "    # 将TF-IDF向量转换为词素集合\n",
    "    new_keywords_tokens = set(feature_names[i] for i in new_keywords_vector.indices)\n",
    "\n",
    "    # 获取所有其他电影的词素集合\n",
    "    other_keywords_tokens = set()\n",
    "    for doc in tfidf_matrix_2[:-1]:\n",
    "        other_keywords_tokens.update(feature_names[i] for i in doc.indices)\n",
    "\n",
    "    # 计算新电影词素集合中在其他电影词素集合中出现的词素集合\n",
    "    common_tokens_2 = new_keywords_tokens.intersection(other_keywords_tokens)\n",
    "\n",
    "    # 输出新电影词素集合中在其他电影词素集合中出现的词素集合\n",
    "\n",
    "    # 计算余弦相似度，仅计算新电影与其他关键词的相似度\n",
    "    cosine_sim_2 = linear_kernel(tfidf_matrix_2[-1:], tfidf_matrix_2[:-1])\n",
    "\n",
    "    # 计算两个集合的交集\n",
    "    intersection_tokens = common_tokens.intersection(common_tokens_2)\n",
    "\n",
    "    ##计算集合的并集\n",
    "    # 使用 union() 方法\n",
    "    union_set = common_tokens.union(common_tokens_2)\n",
    "    union_set = union_set.union(intersection_tokens)\n",
    "    print(len(common_tokens_2))\n",
    "    print(len(intersection_tokens))\n",
    "    print(len(common_tokens_2))\n",
    "    ##计算参数的方法\n",
    "    if(len(common_tokens_2)==len(intersection_tokens)==len(union_set)):\n",
    "        C2 = 0.5\n",
    "    elif(len(common_tokens_2)==len(intersection_tokens)==0):\n",
    "        C2 = 0\n",
    "    elif(len(common_tokens)==len(intersection_tokens)==0):\n",
    "        C2 = 1\n",
    "    else:\n",
    "        C2 = (len(common_tokens_2)+len(intersection_tokens)/2)/(len(common_tokens)+len(common_tokens_2))\n",
    "\n",
    "    # 余弦结果获取与输入最相似的电影\n",
    "    sim_scores_1 = list(enumerate(cosine_sim_1[0]))\n",
    "    # 获余弦结果取与输入最相似的关键词\n",
    "    sim_scores_2 = list(enumerate(cosine_sim_2[0]))\n",
    "\n",
    "    # 计算加权平均相似度分数\n",
    "    # 每个相似度分数列表的权重都是0.5\n",
    "    combined_sim_scores = [(idx, (1-C2)* score1 + C2 * score2) for (idx, score1), (_, score2) in zip(sim_scores_1, sim_scores_2)]\n",
    "\n",
    "    # 对合并后的相似度分数进行排序，取得最相似的条目\n",
    "    combined_sim_scores = sorted(combined_sim_scores, key=lambda x: x[1], reverse=True)\n",
    "\n",
    "    # 筛选出分数大于0的条目\n",
    "    non_zero_scores = [item for item in combined_sim_scores if item[1] > 0]\n",
    "\n",
    "    # 对这些条目按分数进行降序排序\n",
    "    non_zero_scores.sort(key=lambda x: x[1], reverse=True)\n",
    "\n",
    "    top_n_indexes = [item[0] for item in non_zero_scores]\n",
    "\n",
    "    scores = [score[1] for score in non_zero_scores]\n",
    "\n",
    "    # 从merged_data中获取相应的id和title\n",
    "    top_n_data = merged_data.loc[top_n_indexes, ['id', 'title','popularity','vote_average','vote_count','ad']]\n",
    "\n",
    "    # 添加分数列\n",
    "    top_n_data['score'] = scores  # 直接添加列\n",
    "    top_n_data = top_n_data[:10]\n",
    "\n",
    "    # 第一步：将top_n_data的ID列转换为浮点数类型（处理小数形式）\n",
    "    top_n_data['id'] = pd.to_numeric(top_n_data['id'], errors='coerce')\n",
    "\n",
    "    # 第二步：删除无法转换为数值的ID（NaN）\n",
    "    top_n_data = top_n_data.dropna(subset=['id'])\n",
    "\n",
    "    # 第三步：将浮点数转换为整数类型（取整去掉小数部分）\n",
    "    top_n_data['id'] = top_n_data['id'].astype(int)\n",
    "\n",
    "    # 确保需要计算的列都是数值类型\n",
    "    columns_to_convert = ['popularity', 'vote_average', 'vote_count', 'score','ad']\n",
    "    for column in columns_to_convert:\n",
    "        top_n_data[column] = pd.to_numeric(top_n_data[column], errors='coerce')\n",
    "\n",
    "    # 删除转换过程中出现的NaN值\n",
    "    top_n_data = top_n_data.dropna(subset=columns_to_convert)\n",
    "    # 定义映射范围和权重参数\n",
    "    mapping = {\n",
    "        'popularity': {'range': (0, 10), 'weight': 0.5},\n",
    "        'ad': {'range': (1, 1), 'weight': 0.1},\n",
    "        'vote_average': {'range': (0, 10), 'weight': 2},\n",
    "        'vote_count': {'range': (0, 10000), 'weight': 0.0008},\n",
    "        'score': {'range': (0, 1), 'weight': 10}\n",
    "    }\n",
    "\n",
    "    def map_to_range(value, original_min, original_max, target_min, target_max):\n",
    "        \"\"\" 将值从原始范围映射到目标范围 \"\"\"\n",
    "        original_range = original_max - original_min\n",
    "        target_range = target_max - target_min\n",
    "        if original_range == 0:\n",
    "            original_range = target_min\n",
    "        normalized_value = (value - original_min) / original_range  # 归一化到 [0, 1]\n",
    "        mapped_value = target_min + (normalized_value * target_range)  # 映射到目标范围\n",
    "        return mapped_value\n",
    "    # 初始化加权和列\n",
    "    top_n_data['weighted_sum'] = 0\n",
    "\n",
    "    # 原始范围可以从数据的实际最小值和最大值中计算\n",
    "    for column, params in mapping.items():\n",
    "        target_min, target_max = params['range']\n",
    "        weight = params['weight']\n",
    "        original_min = top_n_data[column].min()\n",
    "        original_max = top_n_data[column].max()\n",
    "        \n",
    "        # 映射值并计算加权和\n",
    "        top_n_data['weighted_sum'] += map_to_range(top_n_data[column], original_min, original_max, target_min, target_max) * weight\n",
    "        #print( map_to_range(top_n_data[column], original_min, original_max, target_min, target_max))\n",
    "\n",
    "    # 根据加权和排序\n",
    "    data_sorted = top_n_data.sort_values(by='weighted_sum', ascending=False)\n",
    "\n",
    "    # 提取 id 列\n",
    "    id_list = data_sorted['id'].tolist()\n",
    "\n",
    "    # 输出 id 列表\n",
    "    return id_list\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [],
   "source": [
    "#al3(\"Fantasy Films\", merged_data)"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "基准代码"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [],
   "source": [
    "def baseal(search,merged_data):\n",
    "    # 输入新的电影名称\n",
    "    movie_title = search\n",
    "    # 使用正则表达式去除非字母字符\n",
    "    new_movie_title = re.sub(r'[^a-zA-Z0-9\\s]', '', movie_title)\n",
    "\n",
    "    # 定义一个移除所有english stop words如'the', 'a'的转换器\n",
    "    tfidf = TfidfVectorizer(stop_words='english')\n",
    "\n",
    "    # 将标题和概述用空格隔开并存储到一个数组（列表）中\n",
    "    title_list = ((merged_data['title']+' ')*5+ ' '+ (merged_data['comb'])+' '+(merged_data['overview']+' ')).tolist()\n",
    "    #title_list = ((merged_data['title']+' ')*2+ ' '+ (merged_data['comb'])).tolist()\n",
    "\n",
    "    title_list.append(new_movie_title)\n",
    "\n",
    "    # 构建 TF-IDF 矩阵\n",
    "    tfidf = TfidfVectorizer(stop_words='english')\n",
    "    tfidf_matrix = tfidf.fit_transform(title_list)\n",
    "\n",
    "    # 输出矩阵形状\n",
    "    tfidf_matrix.shape\n",
    "\n",
    "        # 获取词汇表\n",
    "    feature_names = tfidf.get_feature_names_out()\n",
    "\n",
    "    # 获取新电影的TF-IDF向量\n",
    "    new_movie_vector = tfidf_matrix[-1]\n",
    "\n",
    "    # 将TF-IDF向量转换为词素集合\n",
    "    new_movie_tokens = set(feature_names[i] for i in new_movie_vector.indices)\n",
    "\n",
    "    # 获取所有其他电影的词素集合\n",
    "    other_movie_tokens = set()\n",
    "    for doc in tfidf_matrix[:-1]:\n",
    "        other_movie_tokens.update(feature_names[i] for i in doc.indices)\n",
    "\n",
    "    # 计算新电影词素集合中在其他电影词素集合中出现的词素集合\n",
    "    common_tokens = new_movie_tokens.intersection(other_movie_tokens)\n",
    "\n",
    "    # 计算余弦相似度，仅计算新电影与其他电影的相似度\n",
    "    cosine_sim_1 = linear_kernel(tfidf_matrix[-1:], tfidf_matrix[:-1])\n",
    "\n",
    "    # 获取与新电影最相似的前20个电影\n",
    "    sim_scores_1 = list(enumerate(cosine_sim_1[0]))\n",
    "\n",
    "    # 定义一个移除所有english stop words如'the', 'a'的转换器\n",
    "    tfidf = TfidfVectorizer(stop_words='english')\n",
    "    # 清理 NaN 值，将其替换为空字符串\n",
    "    keywords_list = (merged_data['keywords']).tolist()\n",
    "    keywords_list.append(new_movie_title)\n",
    "    # # 打印前5个组合后的结果以进行验证\n",
    "    # print(\"前5个组合后的结果:\", keywords_list[:5])\n",
    "\n",
    "    # 构建 TF-IDF 矩阵\n",
    "    tfidf = TfidfVectorizer(stop_words='english')\n",
    "    tfidf_matrix_2 = tfidf.fit_transform(keywords_list)\n",
    "\n",
    "    # 输出矩阵形状\n",
    "    tfidf_matrix_2.shape\n",
    "\n",
    "        # 获取词汇表\n",
    "    feature_names = tfidf.get_feature_names_out()\n",
    "\n",
    "    # 获取新keywords的TF-IDF向量\n",
    "    new_keywords_vector = tfidf_matrix_2[-1]\n",
    "\n",
    "    # 将TF-IDF向量转换为词素集合\n",
    "    new_keywords_tokens = set(feature_names[i] for i in new_keywords_vector.indices)\n",
    "\n",
    "    # 获取所有其他电影的词素集合\n",
    "    other_keywords_tokens = set()\n",
    "    for doc in tfidf_matrix_2[:-1]:\n",
    "        other_keywords_tokens.update(feature_names[i] for i in doc.indices)\n",
    "\n",
    "    # 计算新电影词素集合中在其他电影词素集合中出现的词素集合\n",
    "    common_tokens_2 = new_keywords_tokens.intersection(other_keywords_tokens)\n",
    "\n",
    "    # 输出新电影词素集合中在其他电影词素集合中出现的词素集合\n",
    "\n",
    "    # 计算余弦相似度，仅计算新电影与其他关键词的相似度\n",
    "    cosine_sim_2 = linear_kernel(tfidf_matrix_2[-1:], tfidf_matrix_2[:-1])\n",
    "\n",
    "    # 计算两个集合的交集\n",
    "    intersection_tokens = common_tokens.intersection(common_tokens_2)\n",
    "\n",
    "    ##计算集合的并集\n",
    "    # 使用 union() 方法\n",
    "    union_set = common_tokens.union(common_tokens_2)\n",
    "    union_set = union_set.union(intersection_tokens)\n",
    "\n",
    "    ##计算参数的方法\n",
    "    if(len(common_tokens_2)==len(intersection_tokens)==len(union_set)):\n",
    "        C2 = 0.5\n",
    "    elif(len(common_tokens_2)==len(intersection_tokens)==0):\n",
    "        C2 = 0\n",
    "    elif(len(common_tokens)==len(intersection_tokens)==0):\n",
    "        C2 = 1\n",
    "    else:\n",
    "        C2 = (len(common_tokens_2)+len(intersection_tokens)/2)/(len(common_tokens)+len(common_tokens_2))\n",
    "\n",
    "    # 余弦结果获取与输入最相似的电影\n",
    "    sim_scores_1 = list(enumerate(cosine_sim_1[0]))\n",
    "    # 获余弦结果取与输入最相似的关键词\n",
    "    sim_scores_2 = list(enumerate(cosine_sim_2[0]))\n",
    "\n",
    "    # 计算加权平均相似度分数\n",
    "    # 每个相似度分数列表的权重都是0.5\n",
    "    combined_sim_scores = [(idx, (1-C2)* score1 + C2 * score2) for (idx, score1), (_, score2) in zip(sim_scores_1, sim_scores_2)]\n",
    "\n",
    "    # 对合并后的相似度分数进行排序，取得最相似的条目\n",
    "    combined_sim_scores = sorted(combined_sim_scores, key=lambda x: x[1], reverse=True)\n",
    "\n",
    "    # 筛选出分数大于0的条目\n",
    "    non_zero_scores = [item for item in combined_sim_scores if item[1] > 0]\n",
    "\n",
    "    # 对这些条目按分数进行降序排序\n",
    "    non_zero_scores.sort(key=lambda x: x[1], reverse=True)\n",
    "\n",
    "    top_n_indexes = [item[0] for item in non_zero_scores]\n",
    "\n",
    "    scores = [score[1] for score in non_zero_scores]\n",
    "\n",
    "    # 从merged_data中获取相应的id和title\n",
    "    top_n_data = merged_data.loc[top_n_indexes, ['id', 'title','popularity','vote_average','vote_count','ad']]\n",
    "\n",
    "    # 添加分数列\n",
    "    top_n_data['score'] = scores  # 直接添加列\n",
    "    top_n_data = top_n_data[:10]\n",
    "\n",
    "    # 第一步：将top_n_data的ID列转换为浮点数类型（处理小数形式）\n",
    "    top_n_data['id'] = pd.to_numeric(top_n_data['id'], errors='coerce')\n",
    "\n",
    "    # 第二步：删除无法转换为数值的ID（NaN）\n",
    "    top_n_data = top_n_data.dropna(subset=['id'])\n",
    "\n",
    "    # 第三步：将浮点数转换为整数类型（取整去掉小数部分）\n",
    "    top_n_data['id'] = top_n_data['id'].astype(int)\n",
    "\n",
    "    # 提取 id 列\n",
    "    id_list = top_n_data['id'].tolist()\n",
    "\n",
    "    # 输出 id 列表\n",
    "    return id_list\n"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "对比代码"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 40,
   "metadata": {},
   "outputs": [],
   "source": [
    "def al4(search, merged_data):\n",
    "    import re\n",
    "    from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "    from sklearn.metrics.pairwise import linear_kernel\n",
    "    import pandas as pd\n",
    "    \n",
    "    # 输入新的电影名称\n",
    "    movie_title = search\n",
    "    # 使用正则表达式去除非字母字符\n",
    "    new_movie_title = re.sub(r'[^a-zA-Z0-9\\s]', '', movie_title)\n",
    "\n",
    "    # 定义一个移除所有english stop words如'the', 'a'的转换器\n",
    "    tfidf = TfidfVectorizer(stop_words='english')\n",
    "\n",
    "    # 将标题和概述用空格隔开并存储到一个数组（列表）中\n",
    "    title_list = ((merged_data['title']+' ')*5+ ' '+ (merged_data['comb'])+' '+(merged_data['overview']+' ')).tolist()\n",
    "    title_list.append(new_movie_title)\n",
    "\n",
    "    # 构建 TF-IDF 矩阵\n",
    "    tfidf_matrix = tfidf.fit_transform(title_list)\n",
    "\n",
    "    # 获取词汇表\n",
    "    feature_names = tfidf.get_feature_names_out()\n",
    "\n",
    "    # 获取新电影的TF-IDF向量\n",
    "    new_movie_vector = tfidf_matrix[-1]\n",
    "\n",
    "    # 将TF-IDF向量转换为词素集合\n",
    "    new_movie_tokens = set(feature_names[i] for i in new_movie_vector.indices)\n",
    "\n",
    "    # 获取所有其他电影的词素集合\n",
    "    other_movie_tokens = set()\n",
    "    for doc in tfidf_matrix[:-1]:\n",
    "        other_movie_tokens.update(feature_names[i] for i in doc.indices)\n",
    "\n",
    "    # 计算新电影词素集合中在其他电影词素集合中出现的词素集合\n",
    "    common_tokens = new_movie_tokens.intersection(other_movie_tokens)\n",
    "\n",
    "    # 计算余弦相似度，仅计算新电影与其他电影的相似度\n",
    "    cosine_sim_1 = linear_kernel(tfidf_matrix[-1:], tfidf_matrix[:-1])\n",
    "\n",
    "    # 获取与新电影最相似的前20个电影\n",
    "    sim_scores_1 = list(enumerate(cosine_sim_1[0]))\n",
    "\n",
    "    # 关键词处理部分\n",
    "    keywords_list = (merged_data['keywords']).tolist()\n",
    "    keywords_list.append(new_movie_title)\n",
    "\n",
    "    # 构建 TF-IDF 矩阵\n",
    "    tfidf_matrix_2 = tfidf.fit_transform(keywords_list)\n",
    "\n",
    "    # 获取词汇表\n",
    "    feature_names = tfidf.get_feature_names_out()\n",
    "\n",
    "    # 获取新keywords的TF-IDF向量\n",
    "    new_keywords_vector = tfidf_matrix_2[-1]\n",
    "\n",
    "    # 将TF-IDF向量转换为词素集合\n",
    "    new_keywords_tokens = set(feature_names[i] for i in new_keywords_vector.indices)\n",
    "\n",
    "    # 获取所有其他电影的词素集合\n",
    "    other_keywords_tokens = set()\n",
    "    for doc in tfidf_matrix_2[:-1]:\n",
    "        other_keywords_tokens.update(feature_names[i] for i in doc.indices)\n",
    "\n",
    "    # 计算新电影词素集合中在其他电影词素集合中出现的词素集合\n",
    "    common_tokens_2 = new_keywords_tokens.intersection(other_keywords_tokens)\n",
    "\n",
    "    # 计算余弦相似度，仅计算新电影与其他关键词的相似度\n",
    "    cosine_sim_2 = linear_kernel(tfidf_matrix_2[-1:], tfidf_matrix_2[:-1])\n",
    "\n",
    "    # 计算两个集合的交集\n",
    "    intersection_tokens = common_tokens.intersection(common_tokens_2)\n",
    "    ##计算集合的并集\n",
    "    # 使用 union() 方法\n",
    "    union_set = common_tokens.union(common_tokens_2)\n",
    "    union_set = union_set.union(intersection_tokens)\n",
    "    # 计算参数 C2\n",
    "    if len(common_tokens_2) == len(intersection_tokens) == len(union_set):\n",
    "        C2 = 0.5\n",
    "    elif len(common_tokens_2) == len(intersection_tokens) == 0:\n",
    "        C2 = 0\n",
    "    elif len(common_tokens) == len(intersection_tokens) == 0:\n",
    "        C2 = 1\n",
    "    else:\n",
    "        C2 = (len(common_tokens_2) + len(intersection_tokens) / 2) / (len(common_tokens) + len(common_tokens_2))\n",
    "\n",
    "    # 余弦结果获取与输入最相似的电影\n",
    "    sim_scores_1 = list(enumerate(cosine_sim_1[0]))\n",
    "    # 获余弦结果取与输入最相似的关键词\n",
    "    sim_scores_2 = list(enumerate(cosine_sim_2[0]))\n",
    "    # 计算加权平均相似度分数\n",
    "    combined_sim_scores = [(idx, (1 - C2) * score1 + C2 * score2) for (idx, score1), (_, score2) in zip(sim_scores_1, sim_scores_2)]\n",
    "\n",
    "    # 对合并后的相似度分数进行排序，取得最相似的条目\n",
    "    combined_sim_scores = sorted(combined_sim_scores, key=lambda x: x[1], reverse=True)\n",
    "\n",
    "    # 筛选出分数大于0的条目\n",
    "    non_zero_scores = [item for item in combined_sim_scores if item[1] > 0]\n",
    "\n",
    "    top_n_indexes = [item[0] for item in non_zero_scores]\n",
    "    scores = [score[1] for score in non_zero_scores]\n",
    "\n",
    "    # 从merged_data中获取相应的id和title\n",
    "    top_n_data = merged_data.loc[top_n_indexes, ['id', 'title', 'popularity', 'vote_average', 'vote_count', 'ad']]\n",
    "\n",
    "    # 添加分数列\n",
    "    top_n_data['score'] = scores  # 直接添加列\n",
    "    top_n_data = top_n_data[:10]\n",
    "\n",
    "    # 第一步：将top_n_data的ID列转换为浮点数类型（处理小数形式）\n",
    "    top_n_data['id'] = pd.to_numeric(top_n_data['id'], errors='coerce')\n",
    "\n",
    "    # 第二步：删除无法转换为数值的ID（NaN）\n",
    "    top_n_data = top_n_data.dropna(subset=['id'])\n",
    "\n",
    "    # 第三步：将浮点数转换为整数类型（取整去掉小数部分）\n",
    "    top_n_data['id'] = top_n_data['id'].astype(int)\n",
    "\n",
    "    # 确保需要计算的列都是数值类型\n",
    "    columns_to_convert = ['popularity', 'vote_average', 'vote_count', 'score', 'ad']\n",
    "    for column in columns_to_convert:\n",
    "        top_n_data[column] = pd.to_numeric(top_n_data[column], errors='coerce')\n",
    "\n",
    "    # 删除转换过程中出现的NaN值\n",
    "    top_n_data = top_n_data.dropna(subset=columns_to_convert)\n",
    "\n",
    "    # 定义第一类因子的映射范围和权重参数\n",
    "    primary_factors = {\n",
    "        'score': {'range': (0, 1), 'weight': 10}, \n",
    "    }\n",
    "\n",
    "    # 定义第二类因子的映射范围和权重参数\n",
    "    secondary_factors = {\n",
    "        'vote_count': {'range': (0, 10000), 'weight': 1/2},\n",
    "        'popularity': {'range': (0, 10), 'weight': 1},\n",
    "        'vote_average': {'range': (0, 10), 'weight': 2},\n",
    "        'ad': {'range': (1, 1), 'weight': 1}\n",
    "    }\n",
    "\n",
    "    # 映射函数\n",
    "    def map_to_range(value, original_min, original_max, target_min, target_max):\n",
    "        \"\"\" 将值从原始范围映射到目标范围 \"\"\"\n",
    "        original_range = original_max - original_min\n",
    "        target_range = target_max - target_min\n",
    "        if original_range == 0:\n",
    "            return target_min\n",
    "        normalized_value = (value - original_min) / original_range  # 归一化到 [0, 1]\n",
    "        mapped_value = target_min + (normalized_value * target_range)  # 映射到目标范围\n",
    "        return mapped_value\n",
    "\n",
    "    # 初始化加权和列\n",
    "    top_n_data['weighted_sum'] = 0\n",
    "\n",
    "    # 处理第一类因子\n",
    "    for column, params in primary_factors.items():\n",
    "        target_min, target_max = params['range']\n",
    "        weight = params['weight']\n",
    "        original_min = top_n_data[column].min()\n",
    "        original_max = top_n_data[column].max()\n",
    "        \n",
    "        # 映射值并计算加权和\n",
    "        top_n_data['weighted_sum'] += map_to_range(top_n_data[column], original_min, original_max, target_min, target_max) * weight\n",
    "\n",
    "    # 处理第二类因子（通过乘法来计算加权和）\n",
    "    for column, params in secondary_factors.items():\n",
    "        target_min, target_max = params['range']\n",
    "        weight = params['weight']\n",
    "        original_min = top_n_data[column].min()\n",
    "        original_max = top_n_data[column].max()\n",
    "\n",
    "        # 映射值并计算加权和\n",
    "        top_n_data['weighted_sum'] *= map_to_range(top_n_data[column], original_min, original_max, target_min, target_max) * weight\n",
    "\n",
    "    # 根据加权和排序\n",
    "    data_sorted = top_n_data.sort_values(by='weighted_sum', ascending=False)\n",
    "\n",
    "    # 提取 id 列\n",
    "    id_list = data_sorted['id'].tolist()\n",
    "\n",
    "    # 输出 id 列表\n",
    "    return id_list\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 41,
   "metadata": {},
   "outputs": [],
   "source": [
    "# movie_title = \"Superhero Movies\"\n",
    "# id_list = al(movie_title,merged_data)"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "迭代1代码"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 42,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "\n",
    "def process_and_store_ids_inplace(input_csv_path, merged_data, target_column_index):\n",
    "    \"\"\"\n",
    "    从输入CSV文件的第一列获取搜索词，调用函数 `al` 获取 `id` 列表，\n",
    "    并将结果存储到指定的列（由target_column_index指定）中，最终将结果保存到同一个CSV文件中。\n",
    "\n",
    "    参数:\n",
    "    input_csv_path (str): 输入的CSV文件路径。\n",
    "    merged_data (DataFrame): 包含电影数据的DataFrame。\n",
    "    target_column_index (int): 要存储结果的列索引（从0开始计数）。\n",
    "    \"\"\"\n",
    "    # 读取输入的CSV文件\n",
    "    input_df = pd.read_csv(input_csv_path)\n",
    "    \n",
    "    # 获取输入CSV文件的列数\n",
    "    num_columns = input_df.shape[0]\n",
    "    \n",
    "    # 检查目标列索引是否在现有列数范围内\n",
    "    if target_column_index >= num_columns:\n",
    "        # 如果目标列索引超出范围，则在最后创建一个新的列\n",
    "        new_column_name = f'Column_{target_column_index}'\n",
    "        input_df[new_column_name] = ''\n",
    "    else:\n",
    "        # 如果目标列索引在范围内，则获取现有列名\n",
    "        new_column_name = input_df.columns[target_column_index]\n",
    "\n",
    "    # 获取第一列的搜索词\n",
    "    search_terms = input_df.iloc[:, 0]  # 读取第一列\n",
    "\n",
    "    # 初始化一个空的列表来存储结果\n",
    "    results = []\n",
    "\n",
    "    # 遍历所有搜索词\n",
    "    for search_term in search_terms:\n",
    "        # 调用函数 al 获取 id 列表\n",
    "        id_list = al(search_term, merged_data)\n",
    "        # 将结果添加到列表中\n",
    "        results.append(id_list)\n",
    "\n",
    "    # 将结果添加到指定的列\n",
    "    input_df[new_column_name] = results\n",
    "\n",
    "    # 将更新后的 DataFrame 保存回输入的 CSV 文件\n",
    "    input_df.to_csv(input_csv_path, index=False)\n",
    "\n"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "迭代2代码"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 43,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "\n",
    "def process_and_store_ids_inplace2(input_csv_path, merged_data, target_column_index):\n",
    "    \"\"\"\n",
    "    从输入CSV文件的第一列获取搜索词，调用函数 `al` 获取 `id` 列表，\n",
    "    并将结果存储到指定的列（由target_column_index指定）中，最终将结果保存到同一个CSV文件中。\n",
    "\n",
    "    参数:\n",
    "    input_csv_path (str): 输入的CSV文件路径。\n",
    "    merged_data (DataFrame): 包含电影数据的DataFrame。\n",
    "    target_column_index (int): 要存储结果的列索引（从0开始计数）。\n",
    "    \"\"\"\n",
    "    # 读取输入的CSV文件\n",
    "    input_df = pd.read_csv(input_csv_path)\n",
    "    \n",
    "    # 获取输入CSV文件的列数\n",
    "    num_columns = input_df.shape[0]\n",
    "    \n",
    "    # 检查目标列索引是否在现有列数范围内\n",
    "    if target_column_index >= num_columns:\n",
    "        # 如果目标列索引超出范围，则在最后创建一个新的列\n",
    "        new_column_name = f'Column_{target_column_index}'\n",
    "        input_df[new_column_name] = ''\n",
    "    else:\n",
    "        # 如果目标列索引在范围内，则获取现有列名\n",
    "        new_column_name = input_df.columns[target_column_index]\n",
    "\n",
    "    # 获取第一列的搜索词\n",
    "    search_terms = input_df.iloc[:, 0]  # 读取第一列\n",
    "\n",
    "    # 初始化一个空的列表来存储结果\n",
    "    results = []\n",
    "\n",
    "    # 遍历所有搜索词\n",
    "    for search_term in search_terms:\n",
    "        # 调用函数 al 获取 id 列表\n",
    "        id_list = al2(search_term, merged_data)\n",
    "        # 将结果添加到列表中\n",
    "        results.append(id_list)\n",
    "\n",
    "    # 将结果添加到指定的列\n",
    "    input_df[new_column_name] = results\n",
    "\n",
    "    # 将更新后的 DataFrame 保存回输入的 CSV 文件\n",
    "    input_df.to_csv(input_csv_path, index=False)\n",
    "\n"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "迭代3"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 44,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "\n",
    "def process_and_store_ids_inplace3(input_csv_path, merged_data, target_column_index):\n",
    "    \"\"\"\n",
    "    从输入CSV文件的第一列获取搜索词，调用函数 `al` 获取 `id` 列表，\n",
    "    并将结果存储到指定的列（由target_column_index指定）中，最终将结果保存到同一个CSV文件中。\n",
    "\n",
    "    参数:\n",
    "    input_csv_path (str): 输入的CSV文件路径。\n",
    "    merged_data (DataFrame): 包含电影数据的DataFrame。\n",
    "    target_column_index (int): 要存储结果的列索引（从0开始计数）。\n",
    "    \"\"\"\n",
    "    # 读取输入的CSV文件\n",
    "    input_df = pd.read_csv(input_csv_path)\n",
    "    \n",
    "    # 获取输入CSV文件的列数\n",
    "    num_columns = input_df.shape[0]\n",
    "    \n",
    "    # 检查目标列索引是否在现有列数范围内\n",
    "    if target_column_index >= num_columns:\n",
    "        # 如果目标列索引超出范围，则在最后创建一个新的列\n",
    "        new_column_name = f'Column_{target_column_index}'\n",
    "        input_df[new_column_name] = ''\n",
    "    else:\n",
    "        # 如果目标列索引在范围内，则获取现有列名\n",
    "        new_column_name = input_df.columns[target_column_index]\n",
    "\n",
    "    # 获取第一列的搜索词\n",
    "    search_terms = input_df.iloc[:, 0]  # 读取第一列\n",
    "\n",
    "    # 初始化一个空的列表来存储结果\n",
    "    results = []\n",
    "\n",
    "    # 遍历所有搜索词\n",
    "    for search_term in search_terms:\n",
    "        # 调用函数 al 获取 id 列表\n",
    "        id_list = al3(search_term, merged_data)\n",
    "        # 将结果添加到列表中\n",
    "        results.append(id_list)\n",
    "\n",
    "    # 将结果添加到指定的列\n",
    "    input_df[new_column_name] = results\n",
    "\n",
    "    # 将更新后的 DataFrame 保存回输入的 CSV 文件\n",
    "    input_df.to_csv(input_csv_path, index=False)\n",
    "\n"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "归并2迭代1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "\n",
    "def process_and_store_ids_inplace4(input_csv_path, merged_data, target_column_index):\n",
    "    \"\"\"\n",
    "    从输入CSV文件的第一列获取搜索词，调用函数 `al` 获取 `id` 列表，\n",
    "    并将结果存储到指定的列（由target_column_index指定）中，最终将结果保存到同一个CSV文件中。\n",
    "\n",
    "    参数:\n",
    "    input_csv_path (str): 输入的CSV文件路径。\n",
    "    merged_data (DataFrame): 包含电影数据的DataFrame。\n",
    "    target_column_index (int): 要存储结果的列索引（从0开始计数）。\n",
    "    \"\"\"\n",
    "    # 读取输入的CSV文件\n",
    "    input_df = pd.read_csv(input_csv_path)\n",
    "    \n",
    "    # 获取输入CSV文件的列数\n",
    "    num_columns = input_df.shape[0]\n",
    "    \n",
    "    # 检查目标列索引是否在现有列数范围内\n",
    "    if target_column_index >= num_columns:\n",
    "        # 如果目标列索引超出范围，则在最后创建一个新的列\n",
    "        new_column_name = f'Column_{target_column_index}'\n",
    "        input_df[new_column_name] = ''\n",
    "    else:\n",
    "        # 如果目标列索引在范围内，则获取现有列名\n",
    "        new_column_name = input_df.columns[target_column_index]\n",
    "\n",
    "    # 获取第一列的搜索词\n",
    "    search_terms = input_df.iloc[:, 0]  # 读取第一列\n",
    "\n",
    "    # 初始化一个空的列表来存储结果\n",
    "    results = []\n",
    "\n",
    "    # 遍历所有搜索词\n",
    "    for search_term in search_terms:\n",
    "        # 调用函数 al 获取 id 列表\n",
    "        id_list = al4(search_term, merged_data)\n",
    "        # 将结果添加到列表中\n",
    "        results.append(id_list)\n",
    "\n",
    "    # 将结果添加到指定的列\n",
    "    input_df[new_column_name] = results\n",
    "\n",
    "    # 将更新后的 DataFrame 保存回输入的 CSV 文件\n",
    "    input_df.to_csv(input_csv_path, index=False)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "\n",
    "def base_process_and_store_ids_inplace(input_csv_path, merged_data, target_column_index):\n",
    "    \"\"\"\n",
    "    从输入CSV文件的第一列获取搜索词，调用函数 `al` 获取 `id` 列表，\n",
    "    并将结果存储到指定的列（由target_column_index指定）中，最终将结果保存到同一个CSV文件中。\n",
    "\n",
    "    参数:\n",
    "    input_csv_path (str): 输入的CSV文件路径。\n",
    "    merged_data (DataFrame): 包含电影数据的DataFrame。\n",
    "    target_column_index (int): 要存储结果的列索引（从0开始计数）。\n",
    "    \"\"\"\n",
    "    # 读取输入的CSV文件\n",
    "    input_df = pd.read_csv(input_csv_path)\n",
    "    \n",
    "    # 获取输入CSV文件的列数\n",
    "    num_columns = input_df.shape[0]\n",
    "    \n",
    "    # 检查目标列索引是否在现有列数范围内\n",
    "    if target_column_index >= num_columns:\n",
    "        # 如果目标列索引超出范围，则在最后创建一个新的列\n",
    "        new_column_name = f'Column_{target_column_index}'\n",
    "        input_df[new_column_name] = ''\n",
    "    else:\n",
    "        # 如果目标列索引在范围内，则获取现有列名\n",
    "        new_column_name = input_df.columns[target_column_index]\n",
    "\n",
    "    # 获取第一列的搜索词\n",
    "    search_terms = input_df.iloc[:, 0]  # 读取第一列\n",
    "\n",
    "    # 初始化一个空的列表来存储结果\n",
    "    results = []\n",
    "\n",
    "    # 遍历所有搜索词\n",
    "    for search_term in search_terms:\n",
    "        # 调用函数 al 获取 id 列表\n",
    "        id_list = baseal(search_term, merged_data)\n",
    "        # 将结果添加到列表中\n",
    "        results.append(id_list)\n",
    "\n",
    "    # 将结果添加到指定的列\n",
    "    input_df[new_column_name] = results\n",
    "\n",
    "    # 将更新后的 DataFrame 保存回输入的 CSV 文件\n",
    "    input_df.to_csv(input_csv_path, index=False)\n",
    "\n"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "猜测"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "\n",
    "def cbase_process_and_store_ids_inplace(input_csv_path, merged_data, target_column_index):\n",
    "    \"\"\"\n",
    "    从输入CSV文件的第一列获取搜索词，调用函数 `al` 获取 `id` 列表，\n",
    "    并将结果存储到指定的列（由target_column_index指定）中，最终将结果保存到同一个CSV文件中。\n",
    "\n",
    "    参数:\n",
    "    input_csv_path (str): 输入的CSV文件路径。\n",
    "    merged_data (DataFrame): 包含电影数据的DataFrame。\n",
    "    target_column_index (int): 要存储结果的列索引（从0开始计数）。\n",
    "    \"\"\"\n",
    "    # 读取输入的CSV文件\n",
    "    input_df = pd.read_csv(input_csv_path)\n",
    "    \n",
    "    # 获取输入CSV文件的列数\n",
    "    num_columns = input_df.shape[0]\n",
    "    \n",
    "    # 检查目标列索引是否在现有列数范围内\n",
    "    if target_column_index >= num_columns:\n",
    "        # 如果目标列索引超出范围，则在最后创建一个新的列\n",
    "        new_column_name = f'Column_{target_column_index}'\n",
    "        input_df[new_column_name] = ''\n",
    "    else:\n",
    "        # 如果目标列索引在范围内，则获取现有列名\n",
    "        new_column_name = input_df.columns[target_column_index]\n",
    "\n",
    "    # 获取第一列的搜索词\n",
    "    search_terms = input_df.iloc[:, 0]  # 读取第一列\n",
    "\n",
    "    # 初始化一个空的列表来存储结果\n",
    "    results = []\n",
    "\n",
    "    # 遍历所有搜索词\n",
    "    for search_term in search_terms:\n",
    "        # 调用函数 al 获取 id 列表\n",
    "        id_list = cbaseal(search_term, merged_data)\n",
    "        # 将结果添加到列表中\n",
    "        results.append(id_list)\n",
    "\n",
    "    # 将结果添加到指定的列\n",
    "    input_df[new_column_name] = results\n",
    "\n",
    "    # 将更新后的 DataFrame 保存回输入的 CSV 文件\n",
    "    input_df.to_csv(input_csv_path, index=False)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "处理完成，结果已保存到输入的 CSV 文件中。\n"
     ]
    }
   ],
   "source": [
    "input_csv_path = \"updata/test.csv\"  # 输入的CSV文件路径\n",
    "# 调用方法\n",
    "# base_process_and_store_ids_inplace(input_csv_path, merged_data,2)\n",
    "# process_and_store_ids_inplace(input_csv_path, merged_data,3)\n",
    "process_and_store_ids_inplace4(input_csv_path, merged_data,6)\n",
    "# cbase_process_and_store_ids_inplace(input_csv_path, merged_data,5)\n",
    "print(\"处理完成，结果已保存到输入的 CSV 文件中。\")"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "pytorch",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.17"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
