import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics import silhouette_score
import warnings
import os
import platform
import multiprocessing as mp
from joblib import Parallel, delayed
import time

# 设置多进程启动方式为 'spawn'（适用于 macOS）
if __name__ == '__main__':
    try:
        if platform.system() == 'Darwin':  # macOS
            mp.set_start_method('spawn')
    except RuntimeError:
        pass  # 如果已设置则忽略

# 忽略警告
warnings.filterwarnings('ignore')


# 设置中文字体支持
def setup_fonts():
    """配置matplotlib支持中文显示"""
    system = platform.system()

    if system == 'Darwin':  # macOS
        plt.rcParams['font.family'] = ['SimHei', 'WenQuanYi Micro Hei', 'Heiti TC']
    elif system == 'Windows':
        plt.rcParams['font.family'] = ['SimHei', 'WenQuanYi Micro Hei', 'Heiti TC']
    else:  # Linux 等其他系统
        plt.rcParams['font.family'] = ['SimHei', 'WenQuanYi Micro Hei', 'Heiti TC']

    # 确保负号正确显示
    plt.rcParams['axes.unicode_minus'] = False
    plt.rcParams['figure.figsize'] = (18, 12)
    plt.rcParams['xtick.labelsize'] = 10
    plt.rcParams['ytick.labelsize'] = 10


# 数据读取分块辅助函数（针对大数据集的CSV文件）
def read_csv_in_chunks(file_path, chunksize=100000):
    """分块读取大型CSV文件"""
    start_time = time.time()
    chunks = []
    print(f"正在分块读取文件: {file_path}")

    for i, chunk in enumerate(pd.read_csv(file_path, chunksize=chunksize)):
        chunks.append(chunk)
        if (i + 1) % 10 == 0:
            print(f"已读取 {i + 1} 个数据块")

    df = pd.concat(chunks, ignore_index=True)
    print(f"文件读取完成，耗时: {time.time() - start_time:.2f} 秒")
    return df


# ----------------------
# 数据预处理
# ----------------------
def load_and_preprocess_data(file_path):
    """加载并预处理电影评分数据"""
    try:
        print(f"开始加载数据: {file_path}")
        start_time = time.time()

        if file_path.endswith('.csv'):
            df = read_csv_in_chunks(file_path)
        else:
            df = pd.read_excel(file_path)

        print(f"数据加载完成，耗时: {time.time() - start_time:.2f} 秒")
        print(f"数据基本信息：")
        df.info()

        # 检查数据集行数
        num_rows = df.shape[0]

        # 智能选择缺失值检查方法
        if num_rows < 10000:
            # 小数据集直接检查
            missing_values = df.isnull().sum()
        else:
            # 大数据集使用并行处理
            missing_values = Parallel(n_jobs=mp.cpu_count())(
                delayed(lambda col: df[col].isnull().sum())(col) for col in df.columns)
            missing_values = pd.Series(missing_values, index=df.columns)

        # 处理缺失值
        if missing_values.sum() > 0:
            print("\n缺失值情况：")
            print(missing_values[missing_values > 0])

            # 根据缺失比例选择处理方式
            for col in df.columns:
                missing_ratio = missing_values[col] / len(df)
                if missing_ratio > 0.3:
                    print(f"列 {col} 缺失比例过高 ({missing_ratio:.2%})，将被删除")
                    df = df.drop(col, axis=1)
                elif missing_ratio > 0:
                    if df[col].dtype in [np.float64, np.int64]:
                        print(f"列 {col} 为数值类型，使用中位数填充缺失值")
                        df[col] = df[col].fillna(df[col].median())
                    else:
                        print(f"列 {col} 为非数值类型，使用众数填充缺失值")
                        df[col] = df[col].fillna(df[col].mode()[0])

            print("\n缺失值处理完成")
        else:
            print("\n数据集中没有缺失值")

        # 检查重复值
        duplicates = df.duplicated().sum()
        if duplicates > 0:
            print(f"\n发现 {duplicates} 条重复记录，正在删除...")
            df = df.drop_duplicates()
            print(f"重复记录已删除")

        # 检查异常值
        numeric_cols = df.select_dtypes(include=[np.number]).columns.tolist()
        if numeric_cols:
            print("\n检查数值列中的异常值:")
            for col in numeric_cols:
                q1 = df[col].quantile(0.25)
                q3 = df[col].quantile(0.75)
                iqr = q3 - q1
                lower_bound = q1 - 3 * iqr
                upper_bound = q3 + 3 * iqr

                outliers = df[(df[col] < lower_bound) | (df[col] > upper_bound)][col]
                if not outliers.empty:
                    print(f"列 {col} 发现 {len(outliers)} 个异常值")
                    # 对异常值进行截断处理
                    df[col] = df[col].clip(lower_bound, upper_bound)

            print("异常值处理完成")

        print(f"\n数据预处理完成，最终数据集形状: {df.shape}")
        return df

    except Exception as e:
        print(f"加载数据时出错: {e}")
        print("创建示例数据用于演示...")

        # 创建示例数据
        np.random.seed(42)
        data = {
            'ID': range(1, 1001),
            'Movie_Name_EN': np.random.choice(['Avengers: Age of Ultron', 'Avatar', 'Titanic',
                                               'The Dark Knight', 'Inception', 'Pride and Prejudice',
                                               'The Lord of the Rings', 'Star Wars', 'Jurassic Park',
                                               'Forrest Gump'], 1000),
            'Movie_Name_CN': np.random.choice(['复仇者联盟2', '阿凡达', '泰坦尼克号',
                                               '黑暗骑士', '盗梦空间', '傲慢与偏见',
                                               '指环王', '星球大战', '侏罗纪公园',
                                               '阿甘正传'], 1000),
            'Username': [f'User{i}' for i in range(1, 201)] * 5,  # 200个用户
            'Star': np.random.randint(1, 6, 1000),  # 1-5分
            'Comment': ['很棒！', '不错', '一般', '很差', '糟糕'] * 200,
            'Like': np.random.randint(0, 200, 1000)
        }

        return pd.DataFrame(data)


# ----------------------
# 构建用户-电影评分矩阵
# ----------------------
def build_user_movie_matrix(df):
    """构建用户-电影评分矩阵"""
    print("\n正在构建用户-电影评分矩阵...")
    start_time = time.time()

    # 构建评分矩阵（行为用户，列为电影，值为评分）
    user_movie_matrix = df.pivot_table(
        values='Star',
        index='Username',
        columns='Movie_Name_CN',
        fill_value=0
    )

    print(f"用户-电影评分矩阵构建完成，耗时: {time.time() - start_time:.2f} 秒")
    print(f"矩阵包含 {user_movie_matrix.shape[0]} 个用户，{user_movie_matrix.shape[1]} 部电影")

    # 检查矩阵稀疏性
    sparsity = 1 - np.count_nonzero(user_movie_matrix) / user_movie_matrix.size
    print(f"矩阵稀疏性: {sparsity:.2%}")

    # 对于超大型矩阵，考虑进行下采样
    if user_movie_matrix.shape[0] > 50000:
        print("检测到超大型矩阵，进行随机下采样以提高性能")
        np.random.seed(42)
        sample_size = min(50000, user_movie_matrix.shape[0])  # 最多保留50000个用户
        sampled_users = np.random.choice(user_movie_matrix.index, sample_size, replace=False)
        user_movie_matrix = user_movie_matrix.loc[sampled_users]
        print(f"下采样后矩阵包含 {user_movie_matrix.shape[0]} 个用户")

    return user_movie_matrix


# ----------------------
# 寻找最佳聚类数
# ----------------------
def find_optimal_clusters(data, max_k=10, plot=True, sample_size=5000):
    """使用肘部法则和轮廓系数寻找最佳聚类数，对大数据集进行优化"""
    print("\n正在寻找最佳聚类数...")
    start_time = time.time()

    # 对大数据集进行采样以加速计算
    if len(data) > sample_size:
        print(f"数据集较大 ({len(data)} 样本)，将采样 {sample_size} 个样本计算轮廓系数")
        np.random.seed(42)
        indices = np.random.choice(len(data), sample_size, replace=False)
        sample_data = data[indices]
    else:
        sample_data = data

    inertia = []
    silhouette_scores = []
    k_range = range(2, max_k + 1)

    for k in k_range:
        print(f"正在评估 k={k}...")
        kmeans = KMeans(n_clusters=k, random_state=42, n_init=10)

        # 对完整数据进行聚类，但只在样本上计算轮廓系数
        labels = kmeans.fit_predict(data)
        inertia.append(kmeans.inertia_)

        if k < len(sample_data):  # 确保聚类数小于样本数
            # 只在样本上计算轮廓系数以提高性能
            sample_labels = labels[indices] if len(data) > sample_size else labels
            score = silhouette_score(sample_data, sample_labels, n_jobs=1)  # 禁用并行
            silhouette_scores.append(score)
            print(f"k={k} 的轮廓系数: {score:.4f}")
        else:
            silhouette_scores.append(0)

    # 找到最佳k值（肘部法则）
    inertia_diff = np.diff(inertia)
    inertia_diff_ratio = inertia_diff / inertia[:-1]
    best_k_elbow = k_range[np.argmax(inertia_diff_ratio)]

    # 找到最佳k值（轮廓系数）
    best_k_silhouette = k_range[np.argmax(silhouette_scores)]

    # 综合两种方法确定最佳k值
    if best_k_elbow == best_k_silhouette:
        best_k = best_k_elbow
    else:
        # 如果两种方法结果不一致，取平均值
        best_k = int((best_k_elbow + best_k_silhouette) / 2)

    print(f"最佳聚类数 (肘部法则): {best_k_elbow}")
    print(f"最佳聚类数 (轮廓系数): {best_k_silhouette}")
    print(f"最终选择的最佳聚类数: {best_k}")
    print(f"寻找最佳聚类数耗时: {time.time() - start_time:.2f} 秒")

    # 可视化结果
    if plot:
        plt.figure(figsize=(18, 6))

        # 肘部法则图
        plt.subplot(1, 2, 1)
        plt.plot(k_range, inertia, 'o-', color='blue')
        plt.xlabel('聚类数 (k)')
        plt.ylabel('惯性 (Inertia)')
        plt.title('肘部法则确定最佳聚类数')
        plt.xticks(k_range)

        # 标记肘部位置
        plt.axvline(x=best_k_elbow, color='red', linestyle='--', label=f'肘部位置 (k={best_k_elbow})')
        plt.legend()

        # 轮廓系数图
        plt.subplot(1, 2, 2)
        plt.plot(k_range, silhouette_scores, 'o-', color='green')
        plt.xlabel('聚类数 (k)')
        plt.ylabel('轮廓系数')
        plt.title('轮廓系数确定最佳聚类数')
        plt.xticks(k_range)

        # 标记最佳位置
        plt.axvline(x=best_k_silhouette, color='red', linestyle='--', label=f'最佳位置 (k={best_k_silhouette})')
        plt.legend()

        plt.tight_layout()
        plt.savefig('optimal_clusters.png', dpi=300, bbox_inches='tight')
        plt.close()

    return best_k


# ----------------------
# K-Means聚类
# ----------------------
def perform_kmeans(data, n_clusters):
    """执行K-Means聚类并返回结果"""
    print(f"\n正在使用 k={n_clusters} 进行聚类...")
    start_time = time.time()

    kmeans = KMeans(n_clusters=n_clusters, random_state=42, n_init=10)
    clusters = kmeans.fit_predict(data)

    # 计算每个聚类的用户数量
    cluster_counts = pd.Series(clusters).value_counts().sort_index()
    print("\n聚类分布:")
    for cluster_id, count in cluster_counts.items():
        print(f"聚类 {cluster_id}: {count} 个用户 ({count / len(clusters):.2%})")

    # 计算聚类质量指标
    inertia = kmeans.inertia_

    # 对大数据集进行采样以计算轮廓系数
    if len(data) > 10000:
        print(f"数据集较大 ({len(data)} 样本)，将采样 10000 个样本计算轮廓系数")
        np.random.seed(42)
        indices = np.random.choice(len(data), 10000, replace=False)
        sample_data = data[indices]
        sample_clusters = clusters[indices]
        silhouette_avg = silhouette_score(sample_data, sample_clusters, n_jobs=1)  # 禁用并行
    else:
        silhouette_avg = silhouette_score(data, clusters, n_jobs=1)  # 禁用并行

    print(f"聚类惯性 (Inertia): {inertia:.2f}")
    print(f"平均轮廓系数: {silhouette_avg:.4f}")
    print(f"聚类耗时: {time.time() - start_time:.2f} 秒")

    return clusters, kmeans


# ----------------------
# 可视化聚类结果
# ----------------------
def visualize_clusters(data, clusters, kmeans, user_movie_matrix):
    """可视化聚类结果并保存图形"""
    print("\n正在可视化聚类结果...")
    start_time = time.time()

    plt.figure(figsize=(20, 16))
    cluster_counts = pd.Series(clusters).value_counts().sort_index()

    # 1. PCA降维散点图
    plt.subplot(2, 2, 1)
    pca = PCA(n_components=2)

    # 对大数据集进行采样以可视化
    if len(data) > 5000:
        print(f"数据集较大 ({len(data)} 样本)，将采样 5000 个样本进行可视化")
        np.random.seed(42)
        indices = np.random.choice(len(data), 5000, replace=False)
        sample_data = data[indices]
        sample_clusters = clusters[indices]
    else:
        sample_data = data
        sample_clusters = clusters

    reduced_data = pca.fit_transform(sample_data)

    scatter = plt.scatter(
        reduced_data[:, 0],
        reduced_data[:, 1],
        c=sample_clusters,
        cmap='viridis',
        edgecolor='k',
        alpha=0.8,
        s=50
    )

    # 绘制聚类中心
    centers = kmeans.cluster_centers_
    centers_reduced = pca.transform(centers)
    plt.scatter(
        centers_reduced[:, 0],
        centers_reduced[:, 1],
        c='red',
        marker='X',
        s=300,
        label='聚类中心'
    )

    plt.xlabel('主成分1')
    plt.ylabel('主成分2')
    plt.title(f'K-Means聚类结果 (k={len(np.unique(clusters))})')
    plt.legend()
    plt.colorbar(scatter, label='聚类标签')

    # 2. 聚类分布柱状图
    plt.subplot(2, 2, 2)
    bars = plt.bar(cluster_counts.index, cluster_counts.values, color='skyblue')
    plt.xlabel('聚类标签')
    plt.ylabel('用户数量')
    plt.title('各聚类的用户分布')
    plt.xticks(cluster_counts.index)

    # 添加数值标签
    for bar in bars:
        height = bar.get_height()
        plt.text(bar.get_x() + bar.get_width() / 2., height + 0.5,
                 f'{height}', ha='center', va='bottom')

    # 3. 聚类评分热力图
    plt.subplot(2, 2, 3)
    user_movie_matrix['Cluster'] = clusters
    cluster_avg_ratings = user_movie_matrix.groupby('Cluster').mean()

    # 只显示评分最高的前10部电影
    top_movies = cluster_avg_ratings.mean().sort_values(ascending=False).head(10).index
    cluster_avg_ratings = cluster_avg_ratings[top_movies]

    # 绘制热力图
    cmap = sns.diverging_palette(220, 10, as_cmap=True)
    sns.heatmap(cluster_avg_ratings, annot=True, cmap=cmap, fmt='.2f', linewidths=.5)
    plt.title('各聚类对热门电影的平均评分')

    # 4. 聚类评分分布
    plt.subplot(2, 2, 4)
    for cluster_id in np.unique(clusters):
        cluster_ratings = user_movie_matrix[user_movie_matrix['Cluster'] == cluster_id].drop('Cluster', axis=1)
        avg_ratings = cluster_ratings.mean(axis=0)
        plt.plot(avg_ratings, label=f'聚类 {cluster_id}', linewidth=2)

    plt.xlabel('电影')
    plt.ylabel('平均评分')
    plt.title('各聚类的评分分布')
    plt.xticks(rotation=90)
    plt.legend()

    plt.tight_layout()
    plt.savefig('clustering_results.png', dpi=300, bbox_inches='tight')
    plt.close()

    # 生成聚类质量报告
    generate_cluster_quality_report(data, clusters)

    print(f"可视化完成，耗时: {time.time() - start_time:.2f} 秒")
    return cluster_avg_ratings


# ----------------------
# 生成聚类质量报告
# ----------------------
def generate_cluster_quality_report(data, clusters):
    """生成聚类质量评估报告"""
    from sklearn.metrics import calinski_harabasz_score, davies_bouldin_score

    print("\n生成聚类质量评估报告:")

    # 对大数据集进行采样以计算评估指标
    if len(data) > 10000:
        print(f"数据集较大 ({len(data)} 样本)，将采样 10000 个样本计算评估指标")
        np.random.seed(42)
        indices = np.random.choice(len(data), 10000, replace=False)
        sample_data = data[indices]
        sample_clusters = clusters[indices]
    else:
        sample_data = data
        sample_clusters = clusters

    # 计算评估指标
    ch_score = calinski_harabasz_score(sample_data, sample_clusters)
    db_score = davies_bouldin_score(sample_data, sample_clusters)
    silhouette_avg = silhouette_score(sample_data, sample_clusters, n_jobs=1)  # 禁用并行

    print(f"Calinski-Harabasz 指数: {ch_score:.2f}")
    print(f"Davies-Bouldin 指数: {db_score:.2f}")
    print(f"轮廓系数: {silhouette_avg:.4f}")

    # 聚类分布
    cluster_counts = pd.Series(clusters).value_counts().sort_index()
    print("\n聚类分布:")
    for cluster_id, count in cluster_counts.items():
        print(f"聚类 {cluster_id}: {count} 个用户 ({count / len(clusters):.2%})")

    # 保存报告
    with open('cluster_quality_report.txt', 'w') as f:
        f.write("聚类质量评估报告\n")
        f.write("=" * 30 + "\n")
        f.write(f"Calinski-Harabasz 指数: {ch_score:.2f}\n")
        f.write(f"Davies-Bouldin 指数: {db_score:.2f}\n")
        f.write(f"轮廓系数: {silhouette_avg:.4f}\n\n")
        f.write("聚类分布:\n")
        for cluster_id, count in cluster_counts.items():
            f.write(f"聚类 {cluster_id}: {count} 个用户 ({count / len(clusters):.2%})\n")


# ----------------------
# 基于用户相似度的推荐系统
# ----------------------
def recommend_movies(user_id, user_movie_matrix, clusters, n_recommendations=5, similarity_threshold=0.5):
    """为指定用户推荐电影"""
    if user_id not in user_movie_matrix.index:
        print(f"用户 {user_id} 不存在！")
        return []

    print(f"\n正在为用户 {user_id} 生成推荐...")
    start_time = time.time()

    # 获取用户的聚类
    user_cluster = clusters[user_movie_matrix.index.get_loc(user_id)]

    # 获取同一聚类中的其他用户
    similar_users = user_movie_matrix.index[clusters == user_cluster].tolist()
    similar_users.remove(user_id)  # 排除用户自身

    if not similar_users:
        print(f"用户 {user_id} 所在的聚类中没有其他用户！")
        return []

    # 获取目标用户的评分向量
    user_ratings = user_movie_matrix.loc[user_id].values.reshape(1, -1)

    # 对大型聚类进行采样以提高性能
    if len(similar_users) > 1000:
        print(f"相似用户数量较多 ({len(similar_users)})，将采样 1000 个用户计算相似度")
        np.random.seed(42)
        similar_users = np.random.choice(similar_users, 1000, replace=False).tolist()

    # 获取相似用户的评分矩阵
    similar_users_ratings = user_movie_matrix.loc[similar_users].values

    # 计算余弦相似度
    similarity_scores = cosine_similarity(user_ratings, similar_users_ratings).flatten()

    # 过滤掉相似度低于阈值的用户
    valid_indices = np.where(similarity_scores > similarity_threshold)[0]

    if len(valid_indices) == 0:
        print(f"没有找到足够相似的用户（相似度阈值: {similarity_threshold}）")
        # 如果没有相似用户，尝试降低阈值
        valid_indices = np.where(similarity_scores > similarity_threshold / 2)[0]
        if len(valid_indices) == 0:
            print("即使降低阈值也没有找到足够相似的用户")
            return []
        else:
            print(f"降低阈值后找到 {len(valid_indices)} 个相似用户")

    filtered_similar_users = [similar_users[i] for i in valid_indices]
    filtered_similarity_scores = similarity_scores[valid_indices]

    # 找到最相似的用户
    top_similar_users_indices = filtered_similarity_scores.argsort()[::-1][:min(10, len(filtered_similar_users))]
    top_similar_users = [filtered_similar_users[i] for i in top_similar_users_indices]
    top_similarity_scores = filtered_similarity_scores[top_similar_users_indices]

    print(f"找到 {len(top_similar_users)} 个相似用户")

    # 获取目标用户未评分的电影
    user_ratings_series = user_movie_matrix.loc[user_id]
    unrated_movies = user_ratings_series[user_ratings_series == 0].index.tolist()

    if not unrated_movies:
        print(f"用户 {user_id} 已对所有电影评分！")
        return []

    # 计算推荐分数（基于相似用户的评分）
    recommendations = {}
    for movie in unrated_movies:
        # 获取相似用户对该电影的评分
        similar_ratings = user_movie_matrix.loc[top_similar_users, movie]

        # 过滤掉未评分的情况
        valid_ratings = similar_ratings[similar_ratings > 0]

        if not valid_ratings.empty:
            # 计算加权平均评分（权重为用户相似度）
            weights = top_similarity_scores[:len(valid_ratings)]
            recommendations[movie] = np.average(valid_ratings, weights=weights)

    # 按评分排序并返回前n个推荐
    if not recommendations:
        print(f"无法为用户 {user_id} 生成推荐！")
        return []

    sorted_recommendations = sorted(recommendations.items(), key=lambda x: x[1], reverse=True)
    print(f"为用户 {user_id} 生成推荐耗时: {time.time() - start_time:.2f} 秒")

    return sorted_recommendations[:n_recommendations]


# ----------------------
# 评估推荐系统
# ----------------------
def evaluate_recommendation_system(user_movie_matrix, clusters, test_size=0.2, n_recommendations=5):
    """评估推荐系统性能"""
    print("\n正在评估推荐系统性能...")
    start_time = time.time()

    # 对大型数据集进行采样以加速评估
    if len(user_movie_matrix) > 1000:
        print(f"数据集较大 ({len(user_movie_matrix)} 用户)，将采样 1000 个用户进行评估")
        np.random.seed(42)
        test_users = np.random.choice(user_movie_matrix.index, 1000, replace=False)
    else:
        # 随机选择测试用户
        np.random.seed(42)
        test_users = np.random.choice(user_movie_matrix.index,
                                      size=int(len(user_movie_matrix.index) * test_size),
                                      replace=False)

    print(f"使用 {len(test_users)} 个用户进行测试")

    precision_scores = []
    recall_scores = []

    for user_id in test_users:
        # 获取用户实际喜欢的电影（评分大于等于4）
        user_ratings = user_movie_matrix.loc[user_id]
        actual_liked_movies = user_ratings[user_ratings >= 4].index.tolist()

        if not actual_liked_movies:
            continue

        # 为用户生成推荐
        recommendations = recommend_movies(user_id, user_movie_matrix, clusters, n_recommendations)
        recommended_movies = [movie for movie, _ in recommendations]

        # 计算命中的推荐数
        hits = set(recommended_movies) & set(actual_liked_movies)

        # 计算精确率和召回率
        precision = len(hits) / n_recommendations if recommended_movies else 0
        recall = len(hits) / len(actual_liked_movies) if actual_liked_movies else 0

        precision_scores.append(precision)
        recall_scores.append(recall)

    # 计算平均精确率和召回率
    avg_precision = np.mean(precision_scores) if precision_scores else 0
    avg_recall = np.mean(recall_scores) if recall_scores else 0

    # 计算F1分数
    f1_score = 2 * (avg_precision * avg_recall) / (avg_precision + avg_recall) if (
                                                                                              avg_precision + avg_recall) > 0 else 0

    print(f"平均精确率: {avg_precision:.4f}")
    print(f"平均召回率: {avg_recall:.4f}")
    print(f"F1分数: {f1_score:.4f}")
    print(f"推荐系统评估耗时: {time.time() - start_time:.2f} 秒")

    return {
        'precision': avg_precision,
        'recall': avg_recall,
        'f1_score': f1_score
    }


# ----------------------
# 主程序
# ----------------------
def main():
    # 设置字体
    setup_fonts()

    # 创建保存图像的目录
    if not os.path.exists('results'):
        os.makedirs('results')

    print("电影推荐系统启动中...")

    # 1. 加载数据
    df = load_and_preprocess_data('DMSC.csv')  # 请替换为你的文件路径

    # 2. 构建用户-电影矩阵
    user_movie_matrix = build_user_movie_matrix(df)

    # 3. 数据标准化
    print("\n正在标准化数据...")
    scaler = StandardScaler()
    scaled_data = scaler.fit_transform(user_movie_matrix)

    # 4. 寻找最佳聚类数
    best_k = find_optimal_clusters(scaled_data, max_k=10)

    # 5. 执行聚类
    clusters, kmeans = perform_kmeans(scaled_data, best_k)

    # 6. 可视化聚类结果
    cluster_avg_ratings = visualize_clusters(scaled_data, clusters, kmeans, user_movie_matrix.copy())

    # 7. 评估推荐系统
    evaluation_results = evaluate_recommendation_system(user_movie_matrix, clusters)

    # 8. 示例推荐
    print("\n生成推荐示例:")
    sample_users = user_movie_matrix.index[:5]  # 前5个用户作为示例
    for user_id in sample_users:
        recommendations = recommend_movies(user_id, user_movie_matrix, clusters, n_recommendations=5)
        if recommendations:
            print(f"\n为用户 {user_id} 推荐的电影:")
            for i, (movie, score) in enumerate(recommendations, 1):
                print(f"  {i}. {movie} (推荐分数: {score:.2f})")

    # 9. 保存结果
    user_movie_matrix['Cluster'] = clusters
    user_movie_matrix.to_csv('results/user_clusters.csv')
    print("\n聚类结果已保存至 'results/user_clusters.csv'")
    print("可视化图形已保存至当前目录")
    print("电影推荐系统运行完成！")


if __name__ == "__main__":
    main()