import pandas as pd
import numpy as np
import os
import joblib
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split

from keras.src.models import Sequential
from keras.callbacks import EarlyStopping
from keras.layers import LSTM,Dense


# ================== 数据预处理函数 ==================
def create_lstm_dataset(data, window_size=7):
    """
    构建时间窗口数据集，用于LSTM训练
    :param data: 标准化后的数据 (np.array)
    :param window_size: 时间步长
    :return: X, y
    """
    X, y = [], []
    for i in range(len(data) - window_size):
        X.append(data[i:i + window_size])
        y.append(data[i + window_size, 0])  # 假设目标是第一个特征（sale_qty）
    return np.array(X), np.array(y)


# ================== 模型构建 ==================
def build_lstm_model(input_shape):
    """
    构建LSTM模型结构
    """
    model = Sequential()
    model.add(LSTM(64, input_shape=input_shape, return_sequences=True))
    model.add(LSTM(32))
    model.add(Dense(1))  # 回归预测
    model.compile(optimizer='adam', loss='mse')
    return model


# ================== 模型训练 ==================
def train_lstm_models(data_file, window_size=7, epochs=50, batch_size=32):
    """
    对每个 spu_code 单独训练一个LSTM模型并保存
    """
    df = pd.read_excel(data_file)
    models = {}
    scalers = {}

    for spu, group in df.groupby('spu_code'):
        print(f"开始训练款号 {spu} 的LSTM模型...")

        # 提取销售量字段
        data = group[['sale_qty']].values.astype(float)

        if len(data) <= window_size:
            print(f"⚠️ 款号 {spu} 数据不足，跳过")
            continue

        # 归一化
        scaler = MinMaxScaler(feature_range=(0, 1))
        scaled_data = scaler.fit_transform(data)
        scalers[spu] = scaler

        # 创建数据集
        X, y = create_lstm_dataset(scaled_data, window_size)

        # 划分训练/验证集
        X_train, X_val, y_train, y_val = train_test_split(
            X, y, test_size=0.2, shuffle=False
        )

        # 构建模型
        model = build_lstm_model((X_train.shape[1], X_train.shape[2]))

        # 添加早停机制
        early_stop = EarlyStopping(monitor='val_loss', patience=10, restore_best_weights=True)

        # 训练模型
        history = model.fit(
            X_train, y_train,
            validation_data=(X_val, y_val),
            epochs=epochs,
            batch_size=batch_size,
            callbacks=[early_stop],
            verbose=0
        )

        # 保存模型
        models[spu] = model
        print(f"✅ 款号 {spu} LSTM模型训练完成，最佳 epoch: {early_stop.stopped_epoch}")

    # 保存模型和缩放器
    joblib.dump(models, 'lstm_models.pkl')
    joblib.dump(scalers, 'scalers.pkl')
    print("✅ 所有LSTM模型及缩放器已保存")
    return models


# ================== 递归预测 ==================
def recursive_lstm_predict(model, initial_sequence, scaler, forecast_days=14, window_size=7):
    """
    使用LSTM进行递归预测
    """
    predictions = []
    current_seq = initial_sequence.copy()

    for _ in range(forecast_days):
        pred = model.predict(current_seq[np.newaxis, :, :], verbose=0)[0]
        pred_unscaled = scaler.inverse_transform([[pred]])[0][0]
        predictions.append(pred_unscaled)

        # 更新序列
        current_seq = np.roll(current_seq, -1, axis=0)
        current_seq[-1, 0] = pred

    return predictions


# ================== 主预测函数 ==================
def predict_with_lstm(models_file, scalers_file, data_file, forecast_days=14, window_size=7):
    """
    加载模型并对每个spu进行预测
    """
    if not os.path.exists(models_file) or not os.path.exists(scalers_file):
        print("❌ 模型或缩放器文件不存在")
        return pd.DataFrame()

    try:
        models = joblib.load(models_file)
        scalers = joblib.load(scalers_file)
        df = pd.read_excel(data_file)
    except Exception as e:
        print(f"❌ 加载文件时出错: {e}")
        return pd.DataFrame()

    all_predictions = []

    for spu, model in models.items():
        spu_data = df[df['spu_code'] == spu]

        if spu_data.empty:
            print(f"⚠️ 未找到款号 {spu} 的数据")
            continue

        # 获取最新数据
        latest_data = spu_data[['sale_qty']].values.astype(float)
        scaler = scalers[spu]

        # 标准化
        scaled_latest = scaler.transform(latest_data)

        # 构造初始输入序列
        initial_seq = scaled_latest[-window_size:]

        # 预测
        preds = recursive_lstm_predict(model, initial_seq, scaler, forecast_days, window_size)

        days_since_launch = spu_data.iloc[-1]['days_since_launch']
        for day, pred in enumerate(preds, start=1):
            all_predictions.append({
                'spu_code': spu,
                'days_since_launch': days_since_launch + day,
                'predicted_sale_qty': float(pred)
            })

    result_df = pd.DataFrame(all_predictions)

    if not result_df.empty:
        print(f"\n✅ 成功生成 {len(result_df)} 条预测记录")
        print(f"📊 预测款号数量: {result_df['spu_code'].nunique()}")
    else:
        print("\n⚠️ 未生成任何预测记录")

    return result_df


# ================== 主程序入口 ==================
if __name__ == "__main__":
    # 训练模型
    train_lstm_models("enhanced_sales_data.xlsx")

    # 执行预测
    future_sales = predict_with_lstm(
        models_file='lstm_models.pkl',
        scalers_file='scalers.pkl',
        data_file='enhanced_sales_data.xlsx'
    )

    if not future_sales.empty:
        output_file = "lstm_future_sales_predictions.xlsx"
        future_sales.to_excel(output_file, index=False)
        print(f"\n✅ 预测结果已保存为 {output_file}")

        # 打印摘要
        print("\n预测结果摘要:")
        print(future_sales.groupby('spu_code').size().reset_index(name='预测天数'))

        # 检查预测值是否变化
        same_value_check = future_sales.groupby('spu_code')['predicted_sale_qty'].nunique()
        print("\n预测值变化检查:")
        for spu, count in same_value_check.items():
            status = "✅ 有变化" if count > 1 else "⚠️ 全部相同"
            print(f"款号 {spu}: {status} ({count}个不同值)")

        # 显示前2个款号的预测结果
        print("\n预测结果示例:")
        for spu in future_sales['spu_code'].unique()[:2]:
            spu_pred = future_sales[future_sales['spu_code'] == spu]
            print(f"\n款号 {spu} 预测结果:")
            print(spu_pred[['days_since_launch', 'predicted_sale_qty']].head(14))
    else:
        print("\n⚠️ 未保存预测结果")
