import paddle
import paddle.nn as nn
import paddle.fft as fft
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
import os

# 设置Matplotlib后端为Agg，避免GUI问题
plt.switch_backend('Agg')


class AutoCorrelationMechanism(nn.Layer):
    """FFT自相关机制替代注意力 (O(L logL)复杂度)"""

    def __init__(self, d_model, top_k=3):
        super().__init__()
        self.top_k = top_k
        self.q_proj = nn.Linear(d_model, d_model)
        self.k_proj = nn.Linear(d_model, d_model)
        self.v_proj = nn.Linear(d_model, d_model)
        self.out_proj = nn.Linear(d_model, d_model)

    def forward(self, x):
        # 1. 投影变换
        q, k, v = self.q_proj(x), self.k_proj(x), self.v_proj(x)

        # 2. FFT计算自相关
        q_fft = fft.rfft(q, axis=1)
        k_fft = fft.rfft(k, axis=1)
        corr = fft.irfft(q_fft * paddle.conj(k_fft), axis=1)

        # 3. 选取top-k滞后周期
        _, top_lags = paddle.topk(corr.mean(axis=(0, 2)), k=self.top_k)

        # 4. 时延聚合
        aggregated = []
        for lag in top_lags:
            shifted = paddle.roll(v, shifts=int(lag), axis=1)

            # 修正权重形状使其与shifted匹配
            weight = corr[:, int(lag)].unsqueeze(1)  #  [batch_size, 1, d_model]
            aggregated.append(shifted * weight)

        # 5. 输出融合
        out = sum(aggregated) / len(aggregated)
        return self.out_proj(out)


class SeriesDecomposition(nn.Layer):
    """滑动平均序列分解 (趋势+季节)"""

    def __init__(self, kernel_size=25):
        super().__init__()
        self.avg_pool = nn.AvgPool1D(
            kernel_size=kernel_size,
            stride=1,
            padding=kernel_size // 2,
            exclusive=False
        )

    def forward(self, x):
        # 输入形状: [batch, seq_len, channels]
        x_t = x.transpose([0, 2, 1])  # [batch, channels, seq_len]
        trend = self.avg_pool(x_t).transpose([0, 2, 1])  # [batch, seq_len, channels]
        seasonal = x - trend
        return trend, seasonal


class TimeLLMBlock(nn.Layer):
    """融合自相关和序列分解的核心块"""

    def __init__(self, d_model, top_k=3, kernel_size=25):
        super().__init__()
        self.autocorr = AutoCorrelationMechanism(d_model, top_k)
        self.decomp1 = SeriesDecomposition(kernel_size)
        self.decomp2 = SeriesDecomposition(kernel_size)

        # 轻量级FFN
        self.ffn = nn.Sequential(
            nn.Linear(d_model, d_model),
            nn.GELU(),
            nn.Linear(d_model, d_model)
        )

    def forward(self, x):
        # 自相关处理
        attn_out = self.autocorr(x)
        x = x + attn_out

        # 序列分解
        trend1, seasonal1 = self.decomp1(x)

        # FFN处理季节部分
        ffn_out = self.ffn(seasonal1)
        seasonal2 = seasonal1 + ffn_out

        # 二次分解
        trend2, seasonal_out = self.decomp2(seasonal2)

        # 趋势累加
        trend_out = trend1 + trend2
        return trend_out, seasonal_out


class EfficientTimeLLM(nn.Layer):
    """高效时序预测模型"""

    def __init__(self, seq_len=96, pred_len=24, d_model=128, num_layers=3, top_k=3):
        super().__init__()
        self.seq_len = seq_len
        self.pred_len = pred_len

        # 1. 轻量级嵌入层
        self.value_embed = nn.Linear(1, d_model)

        # 2. 时间特征嵌入
        self.hour_embed = nn.Embedding(24, d_model)
        self.dow_embed = nn.Embedding(7, d_model)

        # 位置嵌入
        self.position_embed = nn.Embedding(seq_len, d_model)

        # 3. 编码器
        self.encoder = nn.LayerList([
            TimeLLMBlock(d_model, top_k) for _ in range(num_layers)
        ])

        # 4. 解码器
        self.decoder = nn.LayerList([
            TimeLLMBlock(d_model, top_k) for _ in range(2)
        ])

        # 5. 预测头
        self.head = nn.Sequential(
            nn.Linear(d_model, d_model * 2),
            nn.GELU(),
            nn.Linear(d_model * 2, pred_len)
        )

    def forward(self, X, time_features):
        # 确保输入为 Tensor
        if not isinstance(X, paddle.Tensor):
            X = paddle.to_tensor(X, dtype='float32')

        # 数值嵌入
        value_emb = self.value_embed(X.unsqueeze(-1))  # [batch, seq_len, d_model]

        # 时间特征嵌入
        hour_emb = self.hour_embed(time_features[:, :, 0].astype('int64'))
        dow_emb = self.dow_embed(time_features[:, :, 1].astype('int64'))
        time_emb = hour_emb + dow_emb

        # 位置嵌入
        positions = paddle.arange(self.seq_len).unsqueeze(0).tile([X.shape[0], 1])
        pos_emb = self.position_embed(positions)

        # 联合嵌入
        x = value_emb + time_emb + pos_emb

        # 编码器处理
        seasonal_list = []
        trend_accum = None

        for layer in self.encoder:
            trend, seasonal = layer(x)
            seasonal_list.append(seasonal)
            if trend_accum is None:
                trend_accum = trend
            else:
                trend_accum = trend_accum + trend

        # 季节特征融合
        seasonal_avg = sum(seasonal_list) / len(seasonal_list)

        # 解码器处理
        for layer in self.decoder:
            _, seasonal_avg = layer(seasonal_avg)

        # 最终预测
        return self.head(seasonal_avg[:, -1])


class TimeSeriesData:
    """时序数据处理工具"""

    def __init__(self, data_path, target_metric="usage_active", seq_len=96, pred_len=24):
        self.target_metric = target_metric
        self.seq_len = seq_len
        self.pred_len = pred_len
        self.scaler = StandardScaler()
        self.load_data(data_path)

    def load_data(self, data_path):
        # 加载原始数据
        df = pd.read_csv(data_path)

        # 筛选目标指标
        target_df = df[df['metric_name'] == self.target_metric].copy()

        # 转换时间戳
        target_df['timestamp'] = pd.to_datetime(target_df['timestamp_value'])
        target_df.set_index('timestamp', inplace=True)

        # 重采样为分钟级数据
        self.ts = target_df['value'].resample('1min').mean().ffill()

        # 提取时间特征
        self.time_features = np.stack([
            self.ts.index.hour.values,
            self.ts.index.dayofweek.values
        ], axis=-1)

        # 数据归一化
        self.ts_values = self.scaler.fit_transform(self.ts.values.reshape(-1, 1)).flatten()

    def create_dataset(self):
        """创建训练数据集"""
        X, y, time_X = [], [], []

        for i in range(len(self.ts_values) - self.seq_len - self.pred_len):
            # 历史序列
            X.append(self.ts_values[i:i + self.seq_len])

            # 时间特征
            time_X.append(self.time_features[i:i + self.seq_len])

            # 预测目标
            y.append(self.ts_values[i + self.seq_len:i + self.seq_len + self.pred_len])

        return np.array(X), np.array(time_X), np.array(y)


def train_model(model, X, time_X, y, epochs=100, batch_size=32):
    """模型训练函数"""
    # 转换为Paddle Tensor
    X_tensor = paddle.to_tensor(X, dtype='float32')
    time_tensor = paddle.to_tensor(time_X, dtype='int64')
    y_tensor = paddle.to_tensor(y, dtype='float32')

    # 优化器和损失函数
    optimizer = paddle.optimizer.AdamW(
        parameters=model.parameters(),
        learning_rate=1e-3,
        weight_decay=1e-5
    )
    scheduler = paddle.optimizer.lr.ReduceOnPlateau(
        learning_rate=optimizer.get_lr(),
        mode='min',
        factor=0.5,
        patience=5
    )

    # 训练循环
    best_loss = float('inf')
    train_losses = []

    for epoch in range(epochs):
        model.train()
        epoch_loss = 0
        permutation = np.random.permutation(len(X))

        for i in range(0, len(X), batch_size):
            # 获取批次
            indices = permutation[i:i + batch_size]
            batch_X = X_tensor[indices]
            batch_time = time_tensor[indices]
            batch_y = y_tensor[indices]

            # 前向传播
            pred = model(batch_X, batch_time)
            loss = nn.functional.mse_loss(pred, batch_y)

            # 反向传播
            optimizer.clear_grad()
            loss.backward()
            optimizer.step()

            epoch_loss += loss.numpy()

        # 平均损失
        avg_loss = epoch_loss / (len(X) / batch_size)
        train_losses.append(avg_loss)
        scheduler.step(avg_loss)

        # 保存最佳模型
        if avg_loss < best_loss:
            best_loss = avg_loss
            paddle.save(model.state_dict(), 'best_model.pdparams')
            print(f"Saved best model with loss: {best_loss:.6f}")

        print(f"Epoch {epoch + 1}/{epochs}, Loss: {avg_loss:.6f}")

    # 绘制训练损失曲线
    plt.figure(figsize=(10, 5))
    plt.plot(train_losses, label='Training Loss')
    plt.title('Training Loss Over Epochs')
    plt.xlabel('Epochs')
    plt.ylabel('MSE Loss')
    plt.legend()
    plt.savefig('training_loss.png')
    plt.close()

    return train_losses


def evaluate_model(model, X_test, time_X_test, y_test, scaler):
    """模型评估函数"""
    model.eval()
    with paddle.no_grad():
        X_tensor = paddle.to_tensor(X_test, dtype='float32')
        time_tensor = paddle.to_tensor(time_X_test, dtype='int64')
        y_tensor = paddle.to_tensor(y_test, dtype='float32')

        pred = model(X_tensor, time_tensor)
        loss = nn.functional.mse_loss(pred, y_tensor).numpy()

        # 反归一化
        y_true = scaler.inverse_transform(y_test[0].reshape(-1, 1)).flatten()
        y_pred = scaler.inverse_transform(pred[0].numpy().reshape(-1, 1)).flatten()

        # 可视化预测结果 (保存为文件)
        plt.figure(figsize=(15, 6))
        plt.plot(y_true, label='True')
        plt.plot(y_pred, label='Predicted')
        plt.title(f"Time Series Prediction (MSE: {loss:.4f})")
        plt.xlabel('Time Steps')
        plt.ylabel('CPU Usage')
        plt.legend()
        plt.grid(True)
        plt.savefig('prediction_result.png')
        plt.close()

        return loss


# 使用示例
if __name__ == "__main__":
    # 1. 数据准备
    data_path = "E:/kylin_model/data/cpu_test.csv"
    if not os.path.exists(data_path):
        print(f"错误：数据文件不存在 {data_path}")
        exit(1)

    print("加载数据...")
    data_loader = TimeSeriesData(data_path, target_metric="usage_active")
    X, time_X, y = data_loader.create_dataset()

    print(f"数据集大小: X={X.shape}, time_X={time_X.shape}, y={y.shape}")

    # 划分训练测试集
    split_idx = int(len(X) * 0.8)
    X_train, X_test = X[:split_idx], X[split_idx:]
    time_train, time_test = time_X[:split_idx], time_X[split_idx:]
    y_train, y_test = y[:split_idx], y[split_idx:]

    print(f"训练集: {len(X_train)} 样本, 测试集: {len(X_test)} 样本")

    # 2. 初始化模型
    print("初始化模型...")
    model = EfficientTimeLLM(
        seq_len=96,  # 使用96分钟历史
        pred_len=24,  # 预测未来24分钟
        d_model=128,  # 嵌入维度
        num_layers=3,  # 编码器层数
        top_k=5  # 自相关top-k滞后
    )

    # 3. 训练模型
    print("开始训练模型...")
    train_losses = train_model(model, X_train, time_train, y_train, epochs=3)

    # 4. 加载最佳模型
    print("加载最佳模型...")
    model.set_state_dict(paddle.load('best_model.pdparams'))

    # 5. 评估模型
    print("评估模型...")
    test_loss = evaluate_model(model, X_test, time_test, y_test, data_loader.scaler)
    print(f"测试集MSE: {test_loss:.6f}")

    # 6. 预测未来
    print("进行未来预测...")
    latest_data = X_test[-1]  # 最新数据
    latest_time = time_test[-1]
    future_pred = model(
        paddle.to_tensor([latest_data], dtype='float32'),
        paddle.to_tensor([latest_time], dtype='int64')
    )

    # 反归一化
    future_pred = data_loader.scaler.inverse_transform(future_pred.numpy().reshape(-1, 1)).flatten()
    print("未来24分钟预测值:")
    print(future_pred)

    # 保存预测结果
    plt.figure(figsize=(10, 5))
    plt.plot(future_pred, marker='o')
    plt.title('Future 24-Minute CPU Usage Prediction')
    plt.xlabel('Minutes Ahead')
    plt.ylabel('CPU Usage')
    plt.grid(True)
    plt.savefig('future_prediction.png')
    plt.close()

    print("所有结果已保存为图像文件")