import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score, calinski_harabasz_score
import os
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import LinearSegmentedColormap


def create_professional_cmap():
    colors = ["#FF9AA2", "#FFB7B2", "#FFDAC1", "#E2F0CB", "#B5EAD7", "#C7CEEA"]
    return LinearSegmentedColormap.from_list("professional", colors)


def load_and_combine_data():
    file_list = [
        'SVM筛选出的热门视频集.csv',
        'video_list_1.csv',
        'video_list_2.csv',
        'video_list_3.csv',
        '线性回归筛选出的热门视频集.csv',
        '聚类筛选出的热门视频集.csv'
    ]

    all_data = []

    for file in file_list:
        if os.path.exists(file):
            df = pd.read_csv(file)
            df['Source'] = os.path.splitext(file)[0]
            all_data.append(df)
        else:
            print(f"警告: 文件 {file} 不存在，跳过")

    if not all_data:
        raise FileNotFoundError("没有找到任何数据文件")

    return pd.concat(all_data, ignore_index=True)


def preprocess_data(data):
    scaler = StandardScaler()
    return scaler.fit_transform(data)


def apply_pca(data_scaled, n_components=2):
    pca = PCA(n_components=n_components)
    principal_components = pca.fit_transform(data_scaled)
    return pca, principal_components


def apply_kmeans(data_pca, n_clusters=3):
    kmeans = KMeans(n_clusters=n_clusters, random_state=42, init='k-means++')
    cluster_labels = kmeans.fit_predict(data_pca)
    return kmeans, cluster_labels


def visualize_results(pca, principal_components, kmeans, cluster_labels, video_data):
    cmap = create_professional_cmap()
    cluster_colors = [cmap(i / 3) for i in range(3)]

    plt.figure(figsize=(18, 12), facecolor='white')
    plt.suptitle('Bilibili Hot Videos Analysis Report',
                 fontsize=24, fontweight='bold', color='#2c3e50', y=0.98)

    ax1 = plt.subplot(2, 2, 1, projection='3d')
    sc = ax1.scatter(
        video_data['Total_Amount'],
        video_data['Quantity'],
        video_data['Discount'],
        c=cluster_labels,
        cmap=cmap,
        s=50,
        edgecolor='w',
        linewidth=0.4,
        alpha=0.8
    )

    ax1.view_init(elev=25, azim=-45)
    ax1.set_title('3D Cluster Visualization: Play Count, Comments, Engagement',
                  fontsize=16, pad=15, color='#2c3e50')
    ax1.set_xlabel('Total Play Count', fontsize=12, labelpad=10)
    ax1.set_ylabel('Comment Quantity', fontsize=12, labelpad=10)
    ax1.set_zlabel('Engagement Rate', fontsize=12, labelpad=10)

    ax1.grid(True, linestyle='--', alpha=0.3)
    ax1.xaxis.pane.fill = False
    ax1.yaxis.pane.fill = False
    ax1.zaxis.pane.fill = False
    ax1.xaxis.pane.set_edgecolor('w')
    ax1.yaxis.pane.set_edgecolor('w')
    ax1.zaxis.pane.set_edgecolor('w')

    legend_elements = [
        plt.Line2D([0], [0], marker='o', color='w', markerfacecolor=cluster_colors[i],
                   markersize=10, label=f'Cluster {i} ({np.sum(cluster_labels == i)})')
        for i in range(3)
    ]
    ax1.legend(handles=legend_elements, loc='upper left', fontsize=10)

    ax2 = plt.subplot(2, 2, 2)
    pca_full = PCA().fit(preprocess_data(video_data.drop('Source', axis=1, errors='ignore')))
    cumulative_variance = np.cumsum(pca_full.explained_variance_ratio_)

    plt.plot(range(1, len(cumulative_variance) + 1), cumulative_variance,
             color='#3498db', linewidth=3, marker='o', markersize=8)

    plt.axhline(y=0.95, color='#e74c3c', linestyle='--', linewidth=2, alpha=0.8)
    plt.text(4.2, 0.96, '95% Threshold', color='#e74c3c', fontsize=12)

    plt.title('Cumulative Explained Variance by Principal Components',
              fontsize=16, pad=15, color='#2c3e50')
    plt.xlabel('Number of Principal Components', fontsize=12)
    plt.ylabel('Cumulative Explained Variance', fontsize=12)
    plt.ylim(0, 1.05)
    plt.grid(True, linestyle='--', alpha=0.3)

    for i, var in enumerate(cumulative_variance):
        plt.annotate(f'{var:.2f}',
                     (i + 1, var),
                     textcoords="offset points",
                     xytext=(0, 10),
                     ha='center',
                     fontsize=9)

    ax3 = plt.subplot(2, 2, 3)
    for cluster_id in range(3):
        mask = cluster_labels == cluster_id
        ax3.scatter(
            principal_components[mask, 0],
            principal_components[mask, 1],
            color=cluster_colors[cluster_id],
            s=80,
            alpha=0.7,
            edgecolor='w',
            linewidth=0.7,
            label=f'Cluster {cluster_id}'
        )

    features = video_data.drop('Source', axis=1, errors='ignore').columns
    arrow_properties = dict(
        arrowstyle="->",
        linewidth=1.5,
        alpha=0.7,
        color='#2c3e50'
    )

    for i, feature in enumerate(features):
        ax3.annotate(
            "",
            xy=(pca.components_[0, i] * 3, pca.components_[1, i] * 3),
            xytext=(0, 0),
            arrowprops=arrow_properties
        )
        ax3.text(
            pca.components_[0, i] * 3.1,
            pca.components_[1, i] * 3.1,
            feature,
            color='#2c3e50',
            fontsize=12,
            fontweight='bold',
            ha='center'
        )

    ax3.set_title('PCA with K-means Clustering', fontsize=16, pad=15, color='#2c3e50')
    ax3.set_xlabel(f'PC1: {pca.explained_variance_ratio_[0] * 100:.1f}% variance', fontsize=12)
    ax3.set_ylabel(f'PC2: {pca.explained_variance_ratio_[1] * 100:.1f}% variance', fontsize=12)
    ax3.legend(loc='best', fontsize=10)
    ax3.grid(True, linestyle='--', alpha=0.3)

    ax4 = plt.subplot(2, 2, 4)
    cluster_means = video_data.drop('Source', axis=1, errors='ignore').copy()
    cluster_means['Cluster'] = cluster_labels
    cluster_means = cluster_means.groupby('Cluster').mean()

    x = np.arange(len(cluster_means.columns))
    width = 0.25

    for i, cluster_id in enumerate(cluster_means.index):
        values = cluster_means.iloc[i].values
        rects = ax4.bar(
            x + i * width,
            values,
            width,
            color=cluster_colors[i],
            alpha=0.85,
            edgecolor='w',
            linewidth=1,
            label=f'Cluster {cluster_id}'
        )

        for rect in rects:
            height = rect.get_height()
            ax4.annotate(f'{height:.1f}',
                         xy=(rect.get_x() + rect.get_width() / 2, height),
                         xytext=(0, 3),
                         textcoords="offset points",
                         ha='center',
                         va='bottom',
                         fontsize=9)

    ax4.set_title('Cluster Feature Comparison', fontsize=16, pad=15, color='#2c3e50')
    ax4.set_ylabel('Value', fontsize=12)
    ax4.set_xticks(x + width)
    ax4.set_xticklabels(cluster_means.columns, rotation=15)
    ax4.legend(fontsize=10)
    ax4.grid(True, linestyle='--', alpha=0.3, axis='y')

    silhouette = silhouette_score(principal_components, cluster_labels)
    calinski = calinski_harabasz_score(principal_components, cluster_labels)

    text_content = (
        f"Clustering Performance:\n"
        f"• Silhouette Score: {silhouette:.3f}\n"
        f"• Calinski-Harabasz Index: {calinski:.0f}\n"
        f"• Variance Explained: {(pca.explained_variance_ratio_.sum() * 100):.1f}%\n"
        f"• Cluster Sizes:\n"
        f"  - Cluster 0: {np.sum(cluster_labels == 0)} videos\n"
        f"  - Cluster 1: {np.sum(cluster_labels == 1)} videos\n"
        f"  - Cluster 2: {np.sum(cluster_labels == 2)} videos"
    )

    plt.figtext(
        0.75, 0.25,
        text_content,
        bbox=dict(facecolor='#f8f9f9', alpha=0.7, edgecolor='#d5dbdb', boxstyle='round,pad=1'),
        fontsize=12,
        color='#2c3e50',
        verticalalignment='top'
    )

    plt.figtext(
        0.95, 0.02,
        "Bilibili Analytics | Video Analysis",
        ha="right",
        fontsize=10,
        color='#7f8c8d',
        alpha=0.7
    )

    for ax in [ax1, ax2, ax3, ax4]:
        for spine in ax.spines.values():
            spine.set_edgecolor('#d5dbdb')
            spine.set_linewidth(1.5)

    plt.tight_layout(rect=[0, 0.03, 1, 0.95])
    plt.savefig('results/video_analysis_report.png', dpi=300, bbox_inches='tight')
    plt.close()


def elbow_method(data_scaled, max_clusters=10):
    distortions = []
    for i in range(1, max_clusters + 1):
        kmeans = KMeans(n_clusters=i, random_state=42)
        kmeans.fit(data_scaled)
        distortions.append(kmeans.inertia_)

    plt.figure(figsize=(10, 6), facecolor='white')
    plt.plot(range(1, max_clusters + 1), distortions, color='#3498db', marker='o', linewidth=2, markersize=8)
    plt.title('Elbow Method for Optimal Cluster Identification',
              fontsize=16, pad=15, color='#2c3e50')
    plt.xlabel('Number of Clusters', fontsize=12)
    plt.ylabel('Within-Cluster Sum of Squares', fontsize=12)
    plt.grid(True, linestyle='--', alpha=0.3)

    elbow_point = np.argmin(np.diff(distortions, 2)) + 2
    plt.scatter(elbow_point, distortions[elbow_point - 1], s=150,
                facecolors='none', edgecolors='#e74c3c', linewidths=2)
    plt.annotate(f'Optimal Clusters: {elbow_point}',
                 (elbow_point, distortions[elbow_point - 1]),
                 textcoords="offset points",
                 xytext=(20, -30),
                 ha='center',
                 fontsize=12,
                 arrowprops=dict(arrowstyle="->", connectionstyle="arc3,rad=-0.2"))

    plt.tight_layout()
    plt.savefig('results/video_elbow_method.png', dpi=300)
    plt.close()
    return distortions


def main():
    print("Loading video data from CSV files...")
    video_data = load_and_combine_data()
    print(f"Loaded {len(video_data)} video records")
    if 'Source' in video_data.columns:
        print("Data sources:", video_data['Source'].unique())

    os.makedirs('results', exist_ok=True)
    video_data.to_csv('results/combined_videos.csv', index=False)

    print("\nStandardizing data...")
    scaled_data = preprocess_data(video_data.drop('Source', axis=1, errors='ignore'))

    print("\nRunning elbow method analysis...")
    elbow_method(scaled_data)

    print("Applying PCA dimensionality reduction...")
    pca, principal_components = apply_pca(scaled_data)
    print(f"PC1 Explained Variance: {pca.explained_variance_ratio_[0] * 100:.1f}%")
    print(f"PC2 Explained Variance: {pca.explained_variance_ratio_[1] * 100:.1f}%")

    print("\nApplying K-means clustering...")
    kmeans, cluster_labels = apply_kmeans(principal_components)

    silhouette = silhouette_score(principal_components, cluster_labels)
    calinski = calinski_harabasz_score(principal_components, cluster_labels)
    print(f"Clustering Silhouette Score: {silhouette:.2f}")
    print(f"Calinski-Harabasz Index: {calinski:.0f}")

    print("\nGenerating visualizations...")
    visualize_results(pca, principal_components, kmeans, cluster_labels, video_data)

    clustered_data = video_data.copy()
    clustered_data['Cluster'] = cluster_labels
    clustered_data['PC1'] = principal_components[:, 0]
    clustered_data['PC2'] = principal_components[:, 1]
    clustered_data.to_csv('results/clustered_videos.csv', index=False)

    cluster_report = clustered_data.groupby('Cluster').agg(['mean', 'std', 'min', 'max'])
    cluster_report.to_csv('results/video_analysis_summary.csv')

    print("\nAnalysis complete! Results saved to 'results' directory")
    print(f"Main report: results/video_analysis_report.png")
    print(f"Data summary: results/video_analysis_summary.csv")


if __name__ == "__main__":
    main()