{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5accadd9-f8f5-4587-8162-11f2f5c3f4d6",
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.datasets import make_moons\n",
    "import matplotlib.pyplot as plt\n",
    "import numpy as np\n",
    "\n",
    "# 生成虚拟数据 （Moon data）\n",
    "X_m, y_m = make_moons(n_samples=300, noise=0.03, random_state=42)\n",
    "\n",
    "# 使用 z 分数对 X_m 进行归一化\n",
    "X_m = (X_m - np.min(X_m, axis=0)) / (np.max(X_m, axis=0)-np.min(X_m, axis=0))\n",
    "\n",
    "label_size = 15 # 标签大小\n",
    "ticklabel_size = 12 # 刻度标签大小\n",
    "\n",
    "fig, ax = plt.subplots(figsize=(7,7))\n",
    "ax.scatter(X_m[:, 0], X_m[:, 1], marker=\"o\", c=y_m, s=7**2, edgecolor=\"k\")\n",
    "plt.axis('off')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f8266fc0-f74c-4aaf-8062-17120a94a2a6",
   "metadata": {},
   "outputs": [],
   "source": [
    "min_pts = 3\n",
    "# 计算样本之间的欧几里得距离\n",
    "Euclidean_distance = np.zeros((X_m.shape[0], X_m.shape[0]))\n",
    "for i in range(Euclidean_distance.shape[0]):\n",
    "    for j in range(Euclidean_distance.shape[1]):\n",
    "        Euclidean_distance[i,j] = np.linalg.norm(X_m[i] - X_m[j])\n",
    "\n",
    "# k-Distance\n",
    "k_distance = np.sort(np.sort(Euclidean_distance, axis=1)[:, min_pts])[::-1]\n",
    "\n",
    "# 绘制 k-Distance 图\n",
    "fig, ax = plt.subplots(figsize=(8,5))\n",
    "ax.plot(np.arange(1, len(k_distance)+1), k_distance, marker='o', linestyle='-', color='tab:orange')\n",
    "ax.set_xlabel('Samples-Index', fontsize=label_size)\n",
    "ax.set_ylabel('k-Distance', fontsize=label_size)\n",
    "ax.tick_params(axis='both', which='major', labelsize=ticklabel_size) \n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "80f41c65-9d73-465d-9d40-967224e7283d",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 初始化\n",
    "r = 0.04\n",
    "\n",
    "cluster_lab = np.zeros(X_m.shape[0]) - 1 #每个点初始化为-1，表示噪声或未标记的点\n",
    "\n",
    "# 查找所有核心点（Phase 1）\n",
    "noise_id = 0\n",
    "corepoint_id = 0\n",
    "for i in range(len(cluster_lab)):\n",
    "    if np.sum(Euclidean_distance[i, :] <= r) >= min_pts:\n",
    "        cluster_lab[i] = 0 # “0” 表示非集群核心点\n",
    "# 可视化核心点\n",
    "        if corepoint_id< 3:\n",
    "            fig, ax = plt.subplots(figsize=(8,8))\n",
    "\n",
    "            # 绘制未标记或噪声点\n",
    "            un_noise_id = (cluster_lab == -1)\n",
    "            ax.scatter(X_m[un_noise_id, 0], X_m[un_noise_id, 1], marker=\"o\", c=\"green\", s=8**2, edgecolor=\"k\", zorder=0)\n",
    "\n",
    "           # 绘制当前核心点的 eps 字段\n",
    "            un_corepoint_id = (cluster_lab == 0)\n",
    "            circle = plt.Circle((X_m[i, 0], X_m[i, 1]), r, edgecolor='red', facecolor='tab:red', alpha=0.5, zorder=1)\n",
    "            ax.add_patch(circle)\n",
    "\n",
    "           # 绘制核心点\n",
    "            ax.scatter(X_m[un_corepoint_id, 0], X_m[un_corepoint_id, 1], marker=\"o\", c='red', s=8**2, edgecolor=\"k\", zorder=2)\n",
    "\n",
    "            ax.set_title(f'Phase 1: Iteration {i}, eps = {r}, minPts = {min_pts}', fontsize=label_size)\n",
    "            plt.axis('off')\n",
    "            plt.show()\n",
    "\n",
    "            corepoint_id += 1\n",
    "            \n",
    "    elif noise_id < 3:\n",
    "        fig, ax = plt.subplots(figsize=(8,8))\n",
    "\n",
    "        # 绘制未标记或噪声点\n",
    "        un_noise_id = (cluster_lab == -1)\n",
    "        ax.scatter(X_m[un_noise_id, 0], X_m[un_noise_id, 1], marker=\"o\", c=\"green\", s=8**2, edgecolor=\"k\", zorder=0)\n",
    "\n",
    "       # 绘制核心点\n",
    "        un_corepoint_id = (cluster_lab == 0)\n",
    "        ax.scatter(X_m[un_corepoint_id, 0], X_m[un_corepoint_id, 1], marker=\"o\", c='red', s=8**2, edgecolor=\"k\", zorder=2)\n",
    "\n",
    "        # 绘制当前点位的 eps 字段\n",
    "        circle = plt.Circle((X_m[i, 0], X_m[i, 1]), r, edgecolor='blue', facecolor='tab:blue', alpha=0.5, zorder=1)\n",
    "        ax.add_patch(circle)\n",
    "\n",
    "        ax.set_title(f'Phase 1: Iteration {i}, eps = {r}, minPts = {min_pts}', fontsize=label_size)\n",
    "        plt.axis('off')\n",
    "        plt.show()\n",
    "\n",
    "        noise_id += 1\n",
    "\n",
    "# 将所有核心点集群\n",
    "fig, ax = plt.subplots(figsize=(8,8))\n",
    "\n",
    "# 绘制未标记或噪声点\n",
    "un_noise_id= (cluster_lab == -1)\n",
    "ax.scatter(X_m[un_noise_id, 0], X_m[un_noise_id, 1], marker=\"o\", c=\"green\", s=8**2, edgecolor=\"k\", zorder=0)\n",
    "\n",
    "# 绘制核心点\n",
    "un_corepoint_id = (cluster_lab == 0)\n",
    "ax.scatter(X_m[un_corepoint_id, 0], X_m[un_corepoint_id, 1], marker=\"o\", c='red', s=8**2, edgecolor=\"k\", zorder=2)\n",
    "\n",
    "ax.set_title(f'Phase 1: Final, eps = {r}, minPts = {min_pts}', fontsize=label_size)\n",
    "plt.axis('off')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "74c40262-257e-4eb3-b586-97347452ddf3",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 扫描未标记的核心点\n",
    "cluster_lab_copy = cluster_lab.copy()\n",
    "core_point_index = np.where(cluster_lab_copy == 0)[0]\n",
    "\n",
    "corepoint_cluster_itridx = 0\n",
    "corepoint_cluster_idx = 0\n",
    "\n",
    "cluster_id = 0 # 初始化集群标签\n",
    "\n",
    "# 集群，直到所有核心点都成功签署标签\n",
    "while cluster_lab_copy[core_point_index].min() == 0:\n",
    "\n",
    "    cluster_id += 1 # 构建新集群\n",
    "\n",
    "    # 找到起点，每个集群都会在每个循环中构建完成\n",
    "    for start_point in core_point_index:\n",
    "        if cluster_lab_copy[start_point] == 0:\n",
    "            c_point = np.array([start_point]) # 将单点视为集群\n",
    "            break\n",
    "\n",
    " # 重复，直到未找到未标记的点\n",
    "    while  c_point.size > 0:\n",
    "        if cluster_id == 2 and corepoint_cluster_idx < 1:\n",
    "            fig, ax = plt.subplots(figsize=(8,8))\n",
    "\n",
    "            # 绘制噪声点\n",
    "            noise_point = (cluster_lab_copy == -1)\n",
    "            ax.scatter(X_m[noise_point, 0], X_m[noise_point, 1], marker=\"o\", c=\"k\", s=8**2, edgecolor=\"k\", zorder=0)\n",
    "\n",
    "            # 绘制未标注的核心点\n",
    "            un_label_id = (cluster_lab_copy == 0)\n",
    "            ax.scatter(X_m[un_label_id, 0], X_m[un_label_id, 1], marker=\"o\", c=\"r\", s=8**2, edgecolor=\"k\", zorder=0)\n",
    "\n",
    "            # 绘制集群 1 核心点\n",
    "            cluster1_idx = (cluster_lab_copy == 1)\n",
    "            ax.scatter(X_m[cluster1_idx, 0], X_m[cluster1_idx, 1], marker=\"o\", c='tab:blue', s=8**2, edgecolor=\"k\", zorder=2)\n",
    "\n",
    "            # 绘制集群 2 核心点\n",
    "            cluster2_idx = (cluster_lab_copy == 2)\n",
    "            ax.scatter(X_m[cluster2_idx, 0], X_m[cluster2_idx, 1], marker=\"o\", c='yellow', s=8**2, edgecolor=\"k\", zorder=2)\n",
    "\n",
    "            ax.set_title(f'Phase 2: Cluster 1, eps = {r}, minPts = {min_pts}', fontsize=label_size)\n",
    "            plt.axis('off')\n",
    "            plt.show()\n",
    "\n",
    "            corepoint_cluster_idx += 1\n",
    "\n",
    "       # 将集群标签分配给第一个候选核心点\n",
    "        cluster_lab_copy[c_point[0]] = cluster_id\n",
    "        circle_id = c_point[0]\n",
    "\n",
    "       # 找到第一个候选核心点的邻居核心点\n",
    "        neighbor_point = np.where(Euclidean_distance[c_point[0]] <= r)[0] # 查找相邻点\n",
    "        neighbor_point = neighbor_point[neighbor_point != c_point[0]]# 过滤掉自身\n",
    "        neighbor_point = neighbor_point[cluster_lab_copy[neighbor_point] == 0] # 保留未标记的核心点\n",
    "\n",
    "        # 将 neighbor_point 连接到 c_point\n",
    "        c_point = np.concatenate((c_point, neighbor_point))\n",
    "\n",
    "       # 删除第一个 c_point 核心点\n",
    "        c_point = np.delete(c_point, 0)\n",
    "\n",
    "        # 删除重复的点\n",
    "        c_point = np.unique(c_point)\n",
    "\n",
    "        if corepoint_cluster_itridx < 3:\n",
    "            fig, ax = plt.subplots(figsize=(8,8))\n",
    "\n",
    "           # 绘制噪声点\n",
    "            noise_point = (cluster_lab_copy == -1)\n",
    "            ax.scatter(X_m[noise_point, 0], X_m[noise_point, 1], marker=\"o\", c=\"k\", s=8**2, edgecolor=\"k\", zorder=0)\n",
    "\n",
    "            # 绘制未标注的核心点\n",
    "            un_label_id = (cluster_lab_copy == 0)\n",
    "            ax.scatter(X_m[un_label_id, 0], X_m[un_label_id, 1], marker=\"o\", c=\"r\", s=8**2, edgecolor=\"k\", zorder=0)\n",
    "\n",
    "            # 绘制当前核心点的 eps 字段\n",
    "            circle = plt.Circle((X_m[circle_id, 0], X_m[circle_id, 1]), r, edgecolor='blue', facecolor='tab:blue', alpha=0.5, zorder=1)\n",
    "            ax.add_patch(circle)\n",
    "\n",
    "            # 绘制集群 1 核心点\n",
    "            cluster1_idx = (cluster_lab_copy == 1)\n",
    "            ax.scatter(X_m[cluster1_idx, 0], X_m[cluster1_idx, 1], marker=\"o\", c='tab:blue', s=8**2, edgecolor=\"k\", zorder=2)\n",
    "\n",
    "            # 绘制集群 2 核心点\n",
    "            cluster2_idx = (cluster_lab_copy == 2)\n",
    "            ax.scatter(X_m[cluster2_idx, 0], X_m[cluster2_idx, 1], marker=\"o\", c='yellow', s=8**2, edgecolor=\"k\", zorder=2)\n",
    "\n",
    "            ax.set_title(f'Phase 2: Iteration {corepoint_cluster_itridx+1}, eps = {r}, minPts = {min_pts}', fontsize=label_size)\n",
    "            plt.axis('off')\n",
    "            plt.show()\n",
    "\n",
    "            corepoint_cluster_itridx += 1\n",
    "\n",
    "# 显示聚类结果\n",
    "fig, ax = plt.subplots(figsize=(8,8))\n",
    "\n",
    "# 绘制噪声点\n",
    "noise_point = (cluster_lab_copy == -1)\n",
    "ax.scatter(X_m[noise_point, 0], X_m[noise_point, 1], marker=\"o\", c=\"k\", s=8**2, edgecolor=\"k\", zorder=0)\n",
    "\n",
    "# 绘制未标注的核心点\n",
    "un_label_id = (cluster_lab_copy == 0)\n",
    "ax.scatter(X_m[un_label_id, 0], X_m[un_label_id, 1], marker=\"o\", c=\"r\", s=8**2, edgecolor=\"k\", zorder=0)\n",
    "\n",
    "# 绘制集群 1 核心点\n",
    "cluster1_idx = (cluster_lab_copy == 1)\n",
    "ax.scatter(X_m[cluster1_idx, 0], X_m[cluster1_idx, 1], marker=\"o\", c='tab:blue', s=8**2, edgecolor=\"k\", zorder=2)\n",
    "\n",
    " # 绘制集群 2 核心点\n",
    "cluster2_idx = (cluster_lab_copy >= 2)\n",
    "ax.scatter(X_m[cluster2_idx, 0], X_m[cluster2_idx, 1], marker=\"o\", c=cluster_lab_copy[cluster2_idx], s=8**2, edgecolor=\"k\", zorder=2)\n",
    "\n",
    "ax.set_title(f'Phase 2: Final, eps = {r}, minPts = {min_pts}', fontsize=label_size)\n",
    "plt.axis('off')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f8d567e0-e117-4c65-bc35-9aaaf5f58051",
   "metadata": {},
   "outputs": [],
   "source": [
    "border_point_lab = cluster_lab_copy.copy()\n",
    "\n",
    "# 获取未标注的点\n",
    "c_indices = np.where(border_point_lab == -1)[0]\n",
    "\n",
    "border_itr = 0\n",
    "\n",
    "# 扫描c_point查找边界点\n",
    "for c_point in c_indices:\n",
    "\n",
    "    # 求 i 的邻位\n",
    "    neighbor_point = np.where(Euclidean_distance[c_point] <= r)[0] # Find neighbor points\n",
    "    neighbor_point= neighbor_point[neighbor_point != c_point] # Filter out itself\n",
    "    neighbor_point= neighbor_point[border_point_lab[neighbor_point] > 0] # Remain core points\n",
    "\n",
    "# 候选点是边界点\n",
    "    if len(neighbor_point) > 0:\n",
    "\n",
    "        # 将最近的核心点的集群标签分配给 candicate 点\n",
    "        distance_arr = Euclidean_distance[c_point,neighbor_point]\n",
    "        closest_point = neighbor_point[np.argmin(distance_arr)]\n",
    "        border_point_lab[c_point] = border_point_lab[closest_point]\n",
    "\n",
    "        if border_itr < 3:\n",
    "           # 显示聚类结果\n",
    "            fig, ax = plt.subplots(figsize=(8,8))\n",
    "\n",
    "            # 绘制噪声点\n",
    "            noise_point = (border_point_lab == -1)\n",
    "            ax.scatter(X_m[noise_point, 0], X_m[noise_point, 1], marker=\"o\", c=\"k\", s=8**2, edgecolor=\"k\", zorder=0)\n",
    "\n",
    "            # 绘制当前核心点的 eps 字段\n",
    "            circle = plt.Circle((X_m[c_point, 0], X_m[c_point, 1]), r, edgecolor='red', facecolor='tab:red', alpha=0.5, zorder=1)\n",
    "            ax.add_patch(circle)\n",
    "\n",
    "            # 绘制集群 1 核心点\n",
    "            cluster1_idx = (border_point_lab == 1)\n",
    "            ax.scatter(X_m[cluster1_idx, 0], X_m[cluster1_idx, 1], marker=\"o\", c='tab:blue', s=8**2, edgecolor=\"k\", zorder=2)\n",
    "\n",
    "            # 绘制集群 2 核心点\n",
    "            cluster2_idx = (border_point_lab >= 2)\n",
    "            ax.scatter(X_m[cluster2_idx, 0], X_m[cluster2_idx, 1], marker=\"o\", c=border_point_lab[cluster2_idx], s=8**2, edgecolor=\"k\", zorder=2)\n",
    "\n",
    "            ax.set_title(f'Phase 3: Itr {border_itr}, eps = {r}, minPts = {min_pts}', fontsize=label_size)\n",
    "            plt.axis('off')\n",
    "            plt.show()\n",
    "\n",
    "            border_itr += 1\n",
    "\n",
    "# 显示最终聚类结果\n",
    "fig, ax = plt.subplots(figsize=(8,8))\n",
    "\n",
    "# 绘制噪声点\n",
    "noise_point = (border_point_lab == -1)\n",
    "ax.scatter(X_m[noise_point, 0], X_m[noise_point, 1], marker=\"o\", c=\"k\", s=8**2, edgecolor=\"k\", zorder=0)\n",
    "\n",
    "# 绘制集群 1 核心点\n",
    "cluster1_idx = (border_point_lab == 1)\n",
    "ax.scatter(X_m[cluster1_idx, 0], X_m[cluster1_idx, 1], marker=\"o\", c='tab:blue', s=8**2, edgecolor=\"k\", zorder=2)\n",
    "\n",
    "# 绘制集群 2 核心点\n",
    "cluster2_idx = (border_point_lab >= 2)\n",
    "ax.scatter(X_m[cluster2_idx, 0], X_m[cluster2_idx, 1], marker=\"o\", c=border_point_lab[cluster2_idx], s=8**2, edgecolor=\"k\", zorder=2)\n",
    "\n",
    "ax.set_title(f'Phase 3: Final, eps = {r}, minPts = {min_pts}', fontsize=label_size)\n",
    "plt.axis('off')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "230fca7e-30de-4fab-9ac6-96e22e70bc48",
   "metadata": {},
   "outputs": [],
   "source": [
    "import time\n",
    "from sklearn.cluster import DBSCAN\n",
    "\n",
    "model_eps = 0.05\n",
    "model_minpts = 3\n",
    "\n",
    "# 定义 DBSCAN 模型\n",
    "dbscan_mdldel = DBSCAN(eps=model_eps, min_samples=model_minpts)\n",
    "\n",
    "# 训练模型\n",
    "StartTime = time.time()\n",
    "dbscan_mdldel.fit(X_m)\n",
    "EndTime = time.time()\n",
    "\n",
    "print(f'Training time: {EndTime - StartTime:.2f} seconds')\n",
    "\n",
    "dbscan_mdldel_labels = dbscan_mdldel.labels_\n",
    "\n",
    "# 显示聚类结果\n",
    "fig, ax = plt.subplots(figsize=(8,8))\n",
    "\n",
    "# 绘制噪声点\n",
    "noise_point = (dbscan_mdldel_labels == -1)\n",
    "ax.scatter(X_m[noise_point, 0], X_m[noise_point, 1], marker=\"o\", c=\"k\", s=8**2, edgecolor=\"k\", zorder=0)\n",
    "\n",
    "# 绘制集群 1 核心点\n",
    "cluster1_idx = (dbscan_mdldel_labels == 0)\n",
    "ax.scatter(X_m[cluster1_idx, 0], X_m[cluster1_idx, 1], marker=\"o\", c='tab:blue', s=8**2, edgecolor=\"k\", zorder=2)\n",
    "\n",
    "# 绘制集群 2 核心点\n",
    "cluster2_idx = (dbscan_mdldel_labels >= 1)\n",
    "ax.scatter(X_m[cluster2_idx, 0], X_m[cluster2_idx, 1], marker=\"o\", c=dbscan_mdldel_labels[cluster2_idx], s=8**2, edgecolor=\"k\", zorder=2)\n",
    "\n",
    "ax.set_title(f'Adjust parameter manually: eps = {model_eps}, minPts = {model_minpts}', fontsize=label_size)\n",
    "plt.axis('off')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "86346d6b-efe8-439f-a815-5c6092617fbf",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import numpy as np\n",
    "\n",
    "movies = pd.read_csv('D:/BaiduNetdiskDownload/movies.csv')\n",
    "print(f'Movie number: {len(movies)}')\n",
    "movies.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b86f6f1b-63bf-47f9-b7df-beb91088d610",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 统计类型\n",
    "GenresList = []\n",
    "for genres in movies['genres']:\n",
    "    GenresList.extend(genres.split('|'))\n",
    "GenresList = np.sort(np.unique(GenresList))\n",
    "print(f'Movie genres: {GenresList}')\n",
    "\n",
    "label_size = 18 # Label size\n",
    "ticklabel_size = 14 # Tick label size\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "378c4efd-7e94-4aac-9126-85d80e156bca",
   "metadata": {},
   "outputs": [],
   "source": [
    "ratings = pd.read_csv('D:/BaiduNetdiskDownload/ratings.csv')\n",
    "userList = np.sort(np.unique(ratings['userId']))\n",
    "print(f'{len(userList)} have provided {len(ratings)} rate records')\n",
    "ratings.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "eb4eab44-2e37-4794-96df-3f726a124aaf",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 创建一个数组以保存所有类型的评分\n",
    "UserGenresRateList = np.zeros((len(userList), len(GenresList)))\n",
    "\n",
    "# 创建一个数组来保存所有类型的电影评分数目\n",
    "# 计算平均评分\n",
    "UserGenresRate_CountsList = np.zeros((len(userList), len(GenresList)))\n",
    "\n",
    "for i in range(len(ratings)):\n",
    "    # User ID start from 1\n",
    "    # 将 User ID 转换为 user_idx 减 1\n",
    "    user_idx = int(ratings.iloc[i]['userId'] - 1)\n",
    "    \n",
    "    # Get rate\n",
    "    rate = ratings.iloc[i]['rating']\n",
    "\n",
    "    # Get target movie\n",
    "    MovieId = ratings.iloc[i]['movieId']\n",
    "\n",
    "   # 拆分 genres字符串\n",
    "    GenresType = movies[movies['movieId'] == MovieId]['genres'].values[0]\n",
    "    GenresType = GenresType.split('|')\n",
    "\n",
    "    # 电影类型的统计率\n",
    "    for genres in GenresType:\n",
    "        # 使用 '[0][0]' 从列表元组中获取索引\n",
    "        # 第一个 [0] 获取 np.where（） 输出的索引列表\n",
    "        # 第二个 [0] 获取 list 的第一项（包含单项）\n",
    "        genres_idx = np.where(GenresList == genres)[0][0]\n",
    "\n",
    "        # Sum rate\n",
    "        UserGenresRateList[user_idx, genres_idx] += rate\n",
    "        \n",
    "        # 计算电影类型的数量\n",
    "        UserGenresRate_CountsList[user_idx, genres_idx] += 1\n",
    "# 避免除以零的情况\n",
    "# 使用 np.divide 来确保除数为零时不会出现错误\n",
    "UserGenresRateList = np.divide(UserGenresRateList, UserGenresRate_CountsList, out=np.zeros_like(UserGenresRateList), where=UserGenresRate_CountsList!=0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e6135d7d-215d-490f-a0ee-d6499dee2de4",
   "metadata": {},
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "\n",
    "genres1 = 'Comedy' # 定义第一个类别\n",
    "genres2 = 'Romance' # 定义第二个类别\n",
    "\n",
    "g1_index = np.where(GenresList == genres1)[0][0] # 获取 genres_list 中的 genres 1 索引\n",
    "g2_index = np.where(GenresList == genres2)[0][0] # Get genres 2 index in the genres_list\n",
    "\n",
    "# 获取同时观看过 genres1 和 genres2 电影的用户列表\n",
    "# 思路：看过相关电影的人在判断自己是否喜欢这些电影时更可信。\n",
    "both_index = np.where(np.logical_and(UserGenresRateList[:,g1_index] > 0, UserGenresRateList[:,g2_index] > 0))[0]\n",
    "print(f'Number of users who watched both {genres1} and {genres2}: {len(both_index)}')\n",
    "\n",
    "# 筛选同时观看过 genres1 和 genres2 的用户的评分信息\n",
    "x = UserGenresRateList[both_index][:, [g1_index, g2_index]]\n",
    "print(f'Shape of x: {x.shape}')\n",
    "\n",
    "# Drawing rates distribution by scatter figure\n",
    "fig, ax = plt.subplots(figsize=(7,7))\n",
    "\n",
    "ax.scatter(x[:,0], x[:,1], marker=\"o\", c='pink', s=10**2, edgecolor=\"k\")\n",
    "\n",
    "ax.tick_params(axis='both', which='major', labelsize=ticklabel_size) # Set tick label size\n",
    "ax.set_xlabel(f'Average rate of {genres1}', fontsize=label_size)\n",
    "ax.set_ylabel(f'Average rate of {genres2}', fontsize=label_size)\n",
    "ax.set_xlim([-0.1, 5.1])\n",
    "ax.set_ylim([-0.1, 5.1])\n",
    "ax.tick_params(axis='both', which='major', labelsize=ticklabel_size) # Set tick label size\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5cfd75a9-cee2-4c76-ae8f-d3bb36cb19bd",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 通过添加新轴将 x 转换为 3-D\n",
    "x_exp = x[:, np.newaxis]\n",
    "print('The shape of x_exp is', x_exp.shape)\n",
    "\n",
    "# 通过 numpy 的broadcast机制计算 arr 向量矩阵\n",
    "x_arr = x[:, np.newaxis] - x\n",
    "print('The shape of x_arr', x_arr.shape)\n",
    "\n",
    "# 求和差值以计算曼哈顿距离\n",
    "Manhattan_distance = np.abs(x_arr).sum(axis=2)\n",
    "print('The shape of Manhattan_distance', Manhattan_distance.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ebd3321e-b70d-4c7d-8653-a0cbabaa960f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 使用 k-Distance 求 eps 的可能值\n",
    "min_pts = 3\n",
    "\n",
    "# k-Distance\n",
    "k_distance = np.sort(np.sort(Manhattan_distance, axis=1)[:, min_pts])[::-1]\n",
    "\n",
    "# 绘制 k-Distance 图\n",
    "fig, ax = plt.subplots(figsize=(8,5))\n",
    "ax.plot(np.arange(1, len(k_distance)+1), k_distance, marker='o', linestyle='-', color='tab:blue')\n",
    "ax.set_xlabel('Sample Index', fontsize=label_size)\n",
    "ax.set_ylabel('k-Distance', fontsize=label_size)\n",
    "ax.tick_params(axis='both', which='major', labelsize=ticklabel_size) # Set tick label size\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6be3a9ff-b438-4c41-bfbf-50a0d5ab02e9",
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.cluster import DBSCAN\n",
    "\n",
    "m_eps = 0.5\n",
    "\n",
    "# 定义 DBSCAN 集群模型\n",
    "DBSCAN_mdldel = DBSCAN(eps=m_eps, min_samples=min_pts)\n",
    "\n",
    "# 训练 DBSCAN 模型\n",
    "DBSCAN_mdldel.fit(x)\n",
    "\n",
    "DBSCAN_labels = DBSCAN_mdldel.labels_\n",
    "\n",
    "# 从 DBSCAN_labels 获取最大的集群\n",
    "# 在使用 np.bincount（） 之前过滤掉噪声点 （labels = -1）\n",
    "biggest = np.argmax(np.bincount(DBSCAN_labels[DBSCAN_labels != -1]))\n",
    "print(f'Biggest cluster label: {biggest}')\n",
    "\n",
    "# 绘制平均评分的散点图\n",
    "fig, ax = plt.subplots(figsize=(7,7))\n",
    "\n",
    "# 绘制噪声点\n",
    "noise_point = (DBSCAN_labels == biggest)\n",
    "ax.scatter(x[noise_point,0], x[noise_point,1], marker=\"o\", c=\"orange\", s=7**2, edgecolor=\"k\", zorder=0)\n",
    "\n",
    "ax.tick_params(axis='both', which='major', labelsize=ticklabel_size) # Set tick label size\n",
    "ax.set_xlabel(f'Average rate of {genres1}', fontsize=label_size)\n",
    "ax.set_ylabel(f'Average rate of {genres2}', fontsize=label_size)\n",
    "ax.set_xlim([-0.1, 5.1])\n",
    "ax.set_ylim([-0.1, 5.1])\n",
    "ax.set_title(f'DBSCAN clustering {biggest}, eps = {m_eps}', fontsize=label_size)\n",
    "ax.tick_params(axis='both', which='major', labelsize=ticklabel_size) # Set tick label size\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c3e4bc15-9a43-42fb-a3ab-f4a8b95c6e14",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 获取集群 biggest 中的用户\n",
    "UserRatingsList = ratings[ratings['userId'].isin(both_index[DBSCAN_labels == biggest])]\n",
    "print('Number of users:', len(UserRatingsList['userId'].unique()))\n",
    "\n",
    "# 获取用户在 UserRatingsList 中观看过的电影列表\n",
    "WatchedList = UserRatingsList['movieId'].unique()\n",
    "print('Number of watched movies:', len(WatchedList))\n",
    "\n",
    "# 从 WatchedList 中过滤掉既不含genres1也不含genres2的电影\n",
    "FilteredList = movies[movies['movieId'].isin(WatchedList) & \n",
    "                         (movies['genres'].str.contains(genres1) | \n",
    "                          movies['genres'].str.contains(genres2))]\n",
    "print('Number of filtered movies:', len(FilteredList))\n",
    "\n",
    "# 只选择 450 部电影，以减少计算时间\n",
    "FilteredList = FilteredList.head(450)\n",
    "print('Number of selected movies:', len(FilteredList))\n",
    "\n",
    "# Union rates and filtered_movies tables by 'movieId'\n",
    "UsermovieRate_table = pd.merge(UserRatingsList, FilteredList, on='movieId')\n",
    "UsermovieRate_table.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ad532b71-8524-4d8c-a55e-86a8b52093b3",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 获取 userID 列表\n",
    "UserList = UsermovieRate_table['userId'].unique()\n",
    "print(f'User number is {len(UserList)}')\n",
    "\n",
    "# 获取 movieID 列表\n",
    "MovieList = UsermovieRate_table['movieId'].unique()\n",
    "print(f'Movie number is {len(MovieList)}')\n",
    "\n",
    "# 生成 UsermovieRate_array\n",
    "# 创建一个 2D 数组来存储用户电影评分\n",
    "UsermovieRate_array = np.zeros((len(UserList), len(MovieList)))\n",
    "\n",
    "# 用 ratings 填充数组\n",
    "for index, row in UsermovieRate_table.iterrows():\n",
    "    UserIndex = np.where(UserList == row['userId'])[0][0]\n",
    "    MovieIndex = np.where(MovieList == row['movieId'])[0][0]\n",
    "    \n",
    "    UsermovieRate_array[UserIndex, MovieIndex] = row['rating']\n",
    "\n",
    "print(f\"Shape of UsermovieRate_array: {UsermovieRate_array.shape}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b7ad8372-768a-449b-8550-1ed8d3472af4",
   "metadata": {},
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "import seaborn as sns\n",
    "\n",
    "# 设置绘图\n",
    "plt.figure(figsize=(10, 5))\n",
    "\n",
    "# 创建热力图\n",
    "sns.heatmap(UsermovieRate_array, cmap='coolwarm', xticklabels=False, yticklabels=False)\n",
    "\n",
    "# 设置标题和标签\n",
    "plt.xlabel('Movies')\n",
    "plt.ylabel('Users')\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "20607d1f-5d4b-412d-b3b9-aec587b4c99a",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 创建一个字典来存储每个用户的未分级电影\n",
    "UnratedMovies = {}\n",
    "UMcount = 0\n",
    "print(UsermovieRate_array.shape)\n",
    "# 遍历每个用户\n",
    "for user_idx, user_ratings in enumerate(UsermovieRate_array):\n",
    "    # 查找未评级电影的索引（其中 rating 为 0）\n",
    "    UnratedIndices = np.where(user_ratings < 0.1)[0]\n",
    "    \n",
    "    # 存储该用户的未分级电影 ID\n",
    "    # 键和值是 user_movie_rate_array 的索引\n",
    "    UnratedMovies[user_idx] = UnratedIndices\n",
    "    \n",
    "    if  isinstance(UnratedIndices, int):        \n",
    "        UMcount += 1\n",
    "    else:\n",
    "        UMcount += UnratedIndices.size\n",
    "\n",
    "print(f\"Number of users with unrated user-movies: {UMcount}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "dfd12a99-5716-4b8f-8e3b-b21d24a51782",
   "metadata": {},
   "outputs": [],
   "source": [
    "from sklearn.metrics.pairwise import cosine_similarity\n",
    "\n",
    "# 计算余弦相似度矩阵\n",
    "SimMatrix = cosine_similarity(UsermovieRate_array)\n",
    "print(f\"Size of sim_matrix is: {SimMatrix.shape}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "366e34f0-f54f-4ca2-b689-512d2f24aa00",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 预测未评级电影评分的功能\n",
    "def predict_ratings(user_idx, UnratedIndices, SimMatrix, UsermovieRate_array, k=10):\n",
    "    # 获取当前用户的相似度分数\n",
    "    UserSimilarities = SimMatrix[user_idx]\n",
    "    \n",
    "    # 按降序排序相似度，获取 top k 相似用户\n",
    "    SimUsers = np.argsort(UserSimilarities)[::-1][1:k+1]\n",
    "    \n",
    "    predicted_ratings = []\n",
    "    \n",
    "    for movie_idx in UnratedIndices:\n",
    "        # 获取该电影的类似用户的评分\n",
    "        SimUser_ratings = UsermovieRate_array[SimUsers, movie_idx]\n",
    "        \n",
    "        # 过滤掉没有给这部电影评分的用户\n",
    "        ValidRatings = SimUser_ratings[SimUser_ratings > 0]\n",
    "        ValidSim = UserSimilarities[SimUsers][SimUser_ratings > 0]\n",
    "        \n",
    "        if len(ValidRatings) > 0:\n",
    "            # 评分加权平均值\n",
    "            predicted_rating = np.sum(ValidRatings * ValidSim) / np.sum(ValidSim)\n",
    "        else:\n",
    "            # 如果没有类似的用户给这部电影评分，则使用全局平均评分\n",
    "            predicted_rating = np.mean(UsermovieRate_array[UsermovieRate_array > 0])\n",
    "        \n",
    "        predicted_ratings.append(predicted_rating)\n",
    "    \n",
    "    return predicted_ratings\n",
    "\n",
    "# 预测所有用户的评分\n",
    "predicted_ratings = {}\n",
    "for user_idx, UnratedIndices in UnratedMovies.items():\n",
    "    predicted_ratings[user_idx] = predict_ratings(user_idx, UnratedIndices, SimMatrix, UsermovieRate_array)\n",
    "\n",
    "# 打印一些统计信息\n",
    "Total = sum(len(ratings) for ratings in predicted_ratings.values())\n",
    "print(f\"Total number of predictions made: {Total}\")\n",
    "print(f\"Average predicted rating: {np.mean([rating for user_ratings in predicted_ratings.values() for rating in user_ratings]):.2f}\")\n",
    "\n",
    "# 使用预测评分更新 user_movie_rate_array\n",
    "for user_idx, user_predictions in predicted_ratings.items():\n",
    "    UsermovieRate_array[user_idx, UnratedMovies[user_idx]] = user_predictions\n",
    "\n",
    "print(\"Updated UsermovieRate_array with predicted ratings.\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e834d257-649a-4e6d-9401-d8f29ddf6ef2",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 设置绘图\n",
    "plt.figure(figsize=(10, 5))\n",
    "\n",
    "# 创建热力图\n",
    "sns.heatmap(UsermovieRate_array, cmap='coolwarm', xticklabels=False, yticklabels=False)\n",
    "\n",
    "# 设置标题和标签\n",
    "plt.xlabel('Movies')\n",
    "plt.ylabel('Users')\n",
    "plt.show()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
