"""
基于聚类的商品新品销量预测工具
运行命令：python new_product_clustering.py
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA

# ======================
# 配置参数
# ======================
DATA_PATH = "lilylikes-cart-20250403.csv"  # 替换为实际数据路径
DAYS_TO_PREDICT = 7  # 需要预测的天数
RANDOM_STATE = 42  # 随机种子


# ======================
# 数据预处理
# ======================
def load_and_preprocess(file_path):
    # 读取数据
    df = pd.read_csv(file_path)

    # 转换日期格式
    df['first_new_date'] = pd.to_datetime(df['first_new_date'])
    df['static_date_id'] = pd.to_datetime(df['static_date_id'])

    # 计算上新后天数
    df['days_since_new'] = (df['static_date_id'] - df['first_new_date']).dt.days

    # 分类特征编码
    df = pd.get_dummies(df, columns=['color', 'size', 'taobao_class_name'])

    # 价格特征
    df['price_diff'] = df['new_price'] - df['daily_price']
    df['price_ratio'] = df['new_price'] / (df['daily_price'] + 1e-6)

    # 聚合7天特征

    agg_features = df.groupby('goods_id').agg(
        cart_cnt_7d_avg=('cart_cnt', 'mean'),
        pay_itm_cnt_7d_avg=('pay_itm_cnt', 'mean'),
        price_diff_7d_avg=('price_diff', 'mean')
    ).add_suffix('_7d_avg').reset_index()
    # agg_features = df.groupby('goods_id').apply(
    #     lambda x: x[x['days_since_new'].between(0, 7)].agg({
    #         'cart_cnt': 'mean',
    #         'pay_itm_cnt': 'mean',
    #         'price_diff': 'mean'
    #     })
    # )

    df = df.merge(agg_features, on='goods_id')
    return df


# ======================
# 特征工程
# ======================
def prepare_features(df):
    # 选择特征（根据实际数据调整）
    features = [
                   'cart_cnt_7d_avg',
                   'pay_itm_cnt_7d_avg',
                   'price_diff',
                   'price_ratio',
                   'days_since_new'
               ] + list(df.filter(regex='^color_|^size_|^taobao_class_name_').columns)

    # 过滤有效数据
    processed_df = df[features].dropna()
    return processed_df, features


# ======================
# 聚类分析
# ======================
def perform_clustering(X, features):
    # 数据标准化
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(X)

    # 肘部法则确定K值
    sse = []
    for k in range(2, 10):
        kmeans = KMeans(n_clusters=k, random_state=RANDOM_STATE)
        kmeans.fit(X_scaled)
        sse.append(kmeans.inertia_)

    # 绘制肘部法则图
    plt.figure(figsize=(10, 6))
    plt.plot(range(2, 10), sse, 'bx-')
    plt.xlabel('Number of clusters (k)')
    plt.ylabel('SSE')
    plt.title('Elbow Method for Optimal k')
    plt.show()

    # 手动选择最佳K值（根据图形选择拐点）
    best_k = int(input("根据肘部图输入最佳聚类数k: "))

    # 执行聚类
    kmeans = KMeans(n_clusters=best_k, random_state=RANDOM_STATE)
    clusters = kmeans.fit_predict(X_scaled)

    # 显示聚类特征
    cluster_profile = pd.DataFrame({
        'feature': features,
        **{f'cluster_{i}': kmeans.cluster_centers_[:, idx]
           for i, idx in enumerate(range(len(features)))}
    })
    print("\n聚类中心特征：")
    print(cluster_profile)

    return scaler, kmeans


# ======================
# 可视化
# ======================
def visualize_clusters(X_scaled, labels):
    # PCA降维可视化
    pca = PCA(n_components=2)
    X_pca = pca.fit_transform(X_scaled)

    plt.figure(figsize=(10, 6))
    plt.scatter(X_pca[:, 0], X_pca[:, 1], c=labels, cmap='viridis', alpha=0.6)
    plt.title('PCA Visualization of Clusters')
    plt.xlabel('Principal Component 1')
    plt.ylabel('Principal Component 2')
    plt.colorbar(label='Cluster')
    plt.show()


# ======================
# 预测函数
# ======================
class SalesPredictor:
    def __init__(self, scaler, model, features):
        self.scaler = scaler
        self.model = model
        self.feature_order = features  # 保持特征顺序

    def predict(self, new_item):
        # 转换为DataFrame确保顺序
        input_df = pd.DataFrame([new_item], columns=self.feature_order)

        # 标准化
        scaled_data = self.scaler.transform(input_df)

        # 预测簇
        cluster = self.model.predict(scaled_data)[0]
        return cluster


# ======================
# 主程序
# ======================
if __name__ == "__main__":
    # 数据加载与预处理
    print("正在加载数据...")
    df = load_and_preprocess(DATA_PATH)

    # 特征准备
    print("正在准备特征...")
    X, features = prepare_features(df)

    # 执行聚类
    print("正在进行聚类分析...")
    scaler, kmeans = perform_clustering(X.values, features)

    # 可视化
    print("生成可视化...")
    visualize_clusters(scaler.transform(X.values), kmeans.labels_)

    # 初始化预测器
    predictor = SalesPredictor(scaler, kmeans, features)

    # 示例预测
    print("\n示例预测：")
    sample_item = {
        'cart_cnt_7d_avg': 15,
        'pay_itm_cnt_7d_avg': 8,
        'price_diff': -20,
        'price_ratio': 0.8,
        'days_since_new': 0
    }
    # 添加分类特征默认值（根据实际数据补充）
    for col in features:
        if col.startswith(('color_', 'size_', 'taobao_class_name_')):
            sample_item[col] = 0
    # 设置至少一个分类特征为1
    sample_item['color_red'] = 1  # 示例

    # 执行预测
    cluster = predictor.predict(sample_item)
    cluster_sales = df[df['cluster'] == cluster]['pay_itm_cnt_7d_avg'].median()

    print(f"预测结果：")
    print(f"- 所属聚类: {cluster}")
    print(f"- 单日预测销量: {cluster_sales:.1f}")
    print(f"- {DAYS_TO_PREDICT}天总预测: {cluster_sales * DAYS_TO_PREDICT:.1f}")

# 输出后续操作建议
print("""
操作建议：
1. 根据肘部法则图选择最佳k值
2. 检查PCA可视化中的聚类分离情况
3. 根据实际业务调整sample_item的特征值
""")