import math
import os
import sys
import time
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
import pandas as pd
from torch.utils.data import DataLoader, TensorDataset

from sklearn.preprocessing import StandardScaler



class PatchEmbedding(nn.Module):
    def __init__(self, model_dim, patch_len, stride, padding, dropout):
        super().__init__()
        self.model_dim = model_dim
        self.patch_len = patch_len
        self.stride = stride
        self.padding = nn.ReplicationPad1d((0, padding))
        self.dropout = nn.Dropout(dropout)
        self.patch2model = nn.Linear(patch_len, model_dim)
        self.position_embedding = PositionalEmbedding(model_dim)
        self.padding_patch_layer = nn.ReplicationPad1d((0, padding))

    def forward(self, x):
        x = self.padding(x)
        x = x.unfold(-1, self.patch_len, self.stride)
        x = torch.reshape(x, (x.shape[0] * x.shape[1], x.shape[2], x.shape[3]))
        x = self.patch2model(x) + self.position_embedding(x)
        return self.dropout(x)


class PositionalEmbedding(nn.Module):
    def __init__(self, d_model, max_len=5000):
        super(PositionalEmbedding, self).__init__()
        # Compute the positional encodings once in log space.
        pe = torch.zeros(max_len, d_model).float()
        pe.require_grad = False

        position = torch.arange(0, max_len).float().unsqueeze(1)
        div_term = (
            torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)
        ).exp()

        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)

        pe = pe.unsqueeze(0)
        self.register_buffer("pe", pe)

    def forward(self, x):
        return self.pe[:, : x.size(1)]


class FullAttention(nn.Module):
    def __init__(
        self,
        mask_flag=True,
        factor=5,
        scale=None,
        attention_dropout=0.1,
        output_attention=False,
    ):
        super(FullAttention, self).__init__()
        self.scale = scale
        self.mask_flag = mask_flag
        self.output_attention = output_attention
        self.dropout = nn.Dropout(attention_dropout)

    def forward(self, queries, keys, values, attn_mask, tau=None, delta=None):
        B, L, H, E = queries.shape
        _, S, _, D = values.shape
        scale = self.scale or 1.0 / math.sqrt(E)

        scores = torch.einsum("blhe,bshe->bhls", queries, keys)

        if self.mask_flag:
            if attn_mask is None:
                attn_mask = TriangularCausalMask(B, L, device=queries.device)

            scores.masked_fill_(attn_mask.mask, -np.inf)

        A = self.dropout(torch.softmax(scale * scores, dim=-1))
        V = torch.einsum("bhls,bshd->blhd", A, values)

        if self.output_attention:
            return V.contiguous(), A
        else:
            return V.contiguous(), None


class TriangularCausalMask:
    def __init__(self, B, L, device="cpu"):
        mask_shape = [B, 1, L, L]
        with torch.no_grad():
            self._mask = torch.triu(
                torch.ones(mask_shape, dtype=torch.bool), diagonal=1
            ).to(device)

    @property
    def mask(self):
        return self._mask


class AttentionLayer(nn.Module):
    def __init__(self, attention, d_model, n_heads, d_keys=None, d_values=None):
        super(AttentionLayer, self).__init__()

        d_keys = d_keys or (d_model // n_heads)
        d_values = d_values or (d_model // n_heads)

        self.inner_attention = attention
        self.query_projection = nn.Linear(d_model, d_keys * n_heads)
        self.key_projection = nn.Linear(d_model, d_keys * n_heads)
        self.value_projection = nn.Linear(d_model, d_values * n_heads)
        self.out_projection = nn.Linear(d_values * n_heads, d_model)
        self.n_heads = n_heads

    def forward(self, queries, keys, values, attn_mask, tau=None, delta=None):
        B, L, _ = queries.shape
        _, S, _ = keys.shape
        H = self.n_heads

        queries = self.query_projection(queries).view(B, L, H, -1)
        keys = self.key_projection(keys).view(B, S, H, -1)
        values = self.value_projection(values).view(B, S, H, -1)

        out, attn = self.inner_attention(
            queries, keys, values, attn_mask, tau=tau, delta=delta
        )
        out = out.view(B, L, -1)

        return self.out_projection(out), attn


class EncoderLayer(nn.Module):
    def __init__(self, attention, d_model, d_ff=None, dropout=0.1, activation="relu"):
        super(EncoderLayer, self).__init__()
        d_ff = d_ff or 4 * d_model
        self.attention = attention
        self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1)
        self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1)
        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.dropout = nn.Dropout(dropout)
        self.activation = F.relu if activation == "relu" else F.gelu

    def forward(self, x, attn_mask=None, tau=None, delta=None):
        new_x, attn = self.attention(x, x, x, attn_mask=attn_mask, tau=tau, delta=delta)
        x = x + self.dropout(new_x)

        y = x = self.norm1(x)
        y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1))))
        y = self.dropout(self.conv2(y).transpose(-1, 1))

        return self.norm2(x + y), attn


class Encoder(nn.Module):
    def __init__(self, attn_layers, conv_layers=None, norm_layer=None):
        super(Encoder, self).__init__()
        self.attn_layers = nn.ModuleList(attn_layers)
        self.conv_layers = (
            nn.ModuleList(conv_layers) if conv_layers is not None else None
        )
        self.norm = norm_layer

    def forward(self, x, attn_mask=None, tau=None, delta=None):
        # x [B, L, D]
        attns = []
        if self.conv_layers is not None:
            for i, (attn_layer, conv_layer) in enumerate(
                zip(self.attn_layers, self.conv_layers)
            ):
                delta = delta if i == 0 else None
                x, attn = attn_layer(x, attn_mask=attn_mask, tau=tau, delta=delta)
                x = conv_layer(x)
                attns.append(attn)
            x, attn = self.attn_layers[-1](x, tau=tau, delta=None)
            attns.append(attn)
        else:
            for attn_layer in self.attn_layers:
                x, attn = attn_layer(x, attn_mask=attn_mask, tau=tau, delta=delta)
                attns.append(attn)

        if self.norm is not None:
            x = self.norm(x)

        return x, attns


class FlattenHead(nn.Module):
    def __init__(self, model_dim, nf, dropout=0):
        super().__init__()
        self.model_dim = model_dim
        self.nf = nf
        self.dropout = nn.Dropout(dropout)
        self.flatten = nn.Flatten(-2)
        self.linear = nn.Linear(nf, model_dim)
        self.mode = 'pretrain'

    def forward(self, x):
        if self.mode == 'pretrain':
            x = self.flatten(x)
            x = self.linear(x)
            x = self.dropout(x)
            return x
        elif self.mode == 'rl':
            x = self.flatten(x)
            x = self.dropout(x)
            return x
        
    def set_pretrain(self):
        self.mode = 'pretrain'
    
    def set_rl(self):
        self.mode = 'rl'

class PatchTST(nn.Module):  # 总体模型
    def __init__(self, config):
        super().__init__()
        self.config = config
        self.model_dim = config.model_dim
        self.patch_len = config.patch_len
        self.stride = config.stride
        self.padding = self.stride  # 或者也可以直接赋值为 config.padding
        self.dropout = config.dropout
        self.baseline_in = getattr(config, 'enc_in', None)
        self.baseline_out = getattr(config, 'c_out', None)

        self.patch_embedding = PatchEmbedding(
            self.model_dim, self.patch_len, self.stride, self.padding, self.dropout
        )
        self.encoder = Encoder(
            [
                EncoderLayer(
                    AttentionLayer(
                        FullAttention(
                            False,
                            config.factor,
                            attention_dropout=config.dropout,
                            output_attention=config.output_attention,
                        ),
                        config.d_model,
                        config.n_heads,
                    ),
                    config.d_model,
                    config.d_ff,
                    dropout=config.dropout,
                    activation=config.activation,
                )
                for l in range(config.e_layers)
            ],
            norm_layer=torch.nn.LayerNorm(config.d_model),
        )
        self.head_nf = config.d_model * int(
            (config.seq_len - self.patch_len) / self.stride + 2
        )
        self.flatten_head = FlattenHead(config.window, self.head_nf)
        self.name = "PatchTST"
        self.mod = "pretrain"
        self.baseline_head = nn.Linear(self.baseline_in,self.baseline_out)

    def forward(self, x):
        # B, L, D = x.shape
        # x_2d = x.reshape(-1, D).cpu().numpy()
        # x_scaled = self.scaler.transform(x_2d)
        # x = torch.tensor(x_scaled, dtype=torch.float32, device=x.device).reshape(B, L, D).to(self.config.device)
        # ------
        patch_num = int((x.shape[1] - self.patch_len) / self.stride + 2)
        # ------归一化
        n_var = x.shape[-1]
        means = x.mean(1, keepdim=True).detach()
        x = x - means
        stdev = torch.sqrt(torch.var(x, dim=1, keepdim=True, unbiased=False) + 1e-5)
        x /= stdev
        # ------
        x = x.permute(0, 2, 1)
        x = self.patch_embedding(x)
        x, attns = self.encoder(x)
        x = x.reshape(-1, n_var, patch_num, self.model_dim)
        x = x.permute(0, 1, 3, 2)# [batch ,n_var, self.model_dim ,patch_num]
        x = self.flatten_head(x)# [batch ,n_var, nf] in rl
        x = x.permute(0, 2, 1)# [batch , nf,n_var] in rl
        # ------反归一化
        x *= stdev
        x += means
        if self.mod == "baseline":
            x  = self.baseline_head(x)
        return x

    def fit(self, train_data: pd.DataFrame):
        """
        训练 PatchTST 模型（包含标准化预处理）。
        :param train_data: 输入的时间序列数据，DataFrame 格式。
        """
        lags = self.config.seq_len
        predict_len = self.config.window  # 预测步长

        # --- 数据归一化（标准化） ---
        self.scaler = StandardScaler()
        data = self.scaler.fit_transform(train_data.values)

        # --- 构建序列样本 ---
        x = []
        y = []
        for i in range(len(data) - lags - predict_len):
            x.append(data[i : i + lags])
            y.append(data[i + lags : i + lags + predict_len])

        x = torch.tensor(x, dtype=torch.float32)
        y = torch.tensor(y, dtype=torch.float32)

        dataset = TensorDataset(x, y)
        dataloader = DataLoader(dataset, batch_size=256, shuffle=True)

        # --- 模型训练 ---
        optimizer = torch.optim.Adam(self.parameters(), lr=1e-3)
        criterion = torch.nn.MSELoss()

        for epoch in range(10):
            for batch_x, batch_y in dataloader:
                batch_x = batch_x.to(self.config.device)
                batch_y = batch_y.to(self.config.device)

                pred = self(batch_x)
                loss = criterion(pred, batch_y)

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()

            print(f"Epoch {epoch+1}, Loss: {loss.item():.6f}")

        return self
    
    def set_pretrain(self):
        self.flatten_head.set_pretrain()
        self.mod = "pretrain"


    def set_rl(self):
        self.flatten_head.set_rl()
        self.mod = "rl"


    def set_baseline(self):
        self.mod = "baseline"


class Config:
    def __init__(self):
        # 模型的总维度（embedding 维度），Transformer 中所有模块都用这个维度
        self.model_dim = 6
        # patch 的长度：输入时间序列会被切分成多个 patch，每个 patch 含有 patch_len 个时间步
        self.patch_len = 16
        # patch 滑动的步长，如果 stride < patch_len 就会产生重叠的 patch
        self.stride = 16
        # Dropout 概率，用于防止过拟合，应用于多头注意力/前馈网络等位置
        self.dropout = 0.1
        # ProbSparse Attention 中用于稀疏采样的 top-k 选择因子，越大代表采样越多 key（通常设置为 5-10）
        self.factor = 5
        # 多头注意力的头数，越多能更好地关注不同的子空间，但训练复杂度也更高
        self.n_heads = 8
        # 模型维度，等同于 model_dim，是 Transformer 层中的主维度
        self.d_model = 64
        # 前馈全连接层的中间隐藏层维度（Transformer 中的 FFN 层）
        self.d_ff = 128
        # 激活函数，用于 Transformer FFN 层中的非线性操作（通常为 "relu" 或 "gelu"）
        self.activation = "relu"
        # 编码器的层数，表示堆叠多少层 Transformer Encoder Layer
        self.e_layers = 3
        # 输入序列的长度，也就是模型看到的历史时间窗口长度（如 128 个时间步）
        self.seq_len = 128
        # 是否输出注意力矩阵，通常用于可视化或解释模型
        self.output_attention = False
        # 预测未来的时间步数（例如预测未来 16 个时间点）
        self.window = 16


def simple_test():
    config = Config()
    model = PatchTST(config)
    x = torch.randn(2, 100, 5)
    y = model(x)
    print(y.shape)


def overfitting_test():
    config = Config()
    seq_len = config.seq_len
    pred_len = config.window
    total_len = seq_len + pred_len

    x_all = np.sin(np.linspace(0, 20, total_len)).reshape(1, total_len, 1)
    x_input = torch.tensor(x_all[:, :seq_len, :], dtype=torch.float32)
    y_true = torch.tensor(x_all[:, seq_len:, :], dtype=torch.float32)

    # 初始化模型和优化器
    model = PatchTST(config)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    loss_fn = nn.MSELoss()

    # 训练模型（仅过拟合一条正弦样本）
    for epoch in range(1000):
        model.train()
        optimizer.zero_grad()
        output = model(x_input)
        loss = loss_fn(output, y_true)
        loss.backward()
        optimizer.step()
        if epoch % 100 == 0:
            print(f"Epoch {epoch}, Loss: {loss.item():.6f}")

    # 可视化预测 vs 真值
    model.eval()
    pred = model(x_input).detach().numpy().flatten()
    plt.plot(np.arange(seq_len), x_input[0, :, 0], label="Input")
    plt.plot(np.arange(seq_len, total_len), y_true[0, :, 0], label="True")
    plt.plot(np.arange(seq_len, total_len), pred, label="Predicted")
    plt.legend()
    plt.title("Overfit on a Sine Wave")
    plt.show()


def test():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print("Using device:", device)

    config = Config()
    model = PatchTST(config).to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    loss_fn = nn.MSELoss()

    x_input, y_true = load_data(config.seq_len, config.window)
    x_input, y_true = x_input.to(torch.float32).to(device), y_true.to(torch.float32).to(
        device
    )

    # 划分训练集和验证集
    num_samples = x_input.shape[0]
    split = int(num_samples * 0.8)
    x_train, y_train = x_input[:split], y_true[:split]
    x_val, y_val = x_input[split:], y_true[split:]

    losses = []
    val_losses = []

    for epoch in range(200):
        model.train()
        optimizer.zero_grad()
        output = model(x_train)
        loss = loss_fn(output, y_train)
        loss.backward()
        optimizer.step()
        losses.append(loss.item())

        # 验证集评估
        model.eval()
        with torch.no_grad():
            val_output = model(x_val)
            val_loss = loss_fn(val_output, y_val).item()
            val_losses.append(val_loss)

        if epoch % 100 == 0:
            print(
                f"Epoch {epoch}, Train Loss: {loss.item():.6f}, Val Loss: {val_loss:.6f}"
            )

    # 绘图并保存
    plt.plot(losses, label="Train Loss")
    plt.plot(val_losses, label="Val Loss")
    plt.title(
        f"Overfit on ETTh1 Sample (seq_len={config.seq_len}, window={config.window})"
    )
    plt.xlabel("Epoch")
    plt.ylabel("Loss")
    plt.legend()

    # 保存图像
    filename = f"./model/loss_plot_seq{config.seq_len}_win{config.window}.png"
    plt.savefig(filename, dpi=300)  # 保存为高分辨率图像
    plt.show()


def load_data(seq_len=100, pred_len=20):
    # print("当前工作目录为：", os.getcwd())  # 打印当前工作目录
    df = pd.read_csv("./testdata/ETT-small/ETTh1.csv")
    df = df.drop(columns=["date"])
    data = df.values.astype(np.float32)

    total_len = seq_len + pred_len
    x_all = []
    y_all = []
    for i in range(len(data) - total_len):
        x_all.append(data[i : i + seq_len])
        y_all.append(data[i + seq_len : i + total_len])

    x_all = np.stack(x_all)
    y_all = np.stack(y_all)
    return torch.tensor(x_all), torch.tensor(y_all)


if __name__ == "__main__":
    PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__)))
    if PROJECT_ROOT not in sys.path:
        sys.path.insert(0, PROJECT_ROOT)
    start_time = time.time()
    # simple_test()
    # overfitting_test()
    test()
    # print(torch.cuda.is_available())
    end_time = time.time()
    duration = end_time - start_time
    print(f"运行时间: {duration:.2f} 秒")
