import math
import numpy as np
import torch
import torch.nn.functional as F
from sklearn.preprocessing import StandardScaler
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
import utils.utils as utils


class PatchEmbedding(nn.Module):
    def __init__(self, model_dim, patch_len, stride, padding, dropout):
        super().__init__()
        self.model_dim = model_dim
        self.patch_len = patch_len
        self.stride = stride
        self.padding = nn.ReplicationPad1d((0, padding))
        self.dropout = nn.Dropout(dropout)
        self.patch2model = nn.Linear(patch_len, model_dim)
        self.position_embedding = PositionalEmbedding(model_dim)
        self.padding_patch_layer = nn.ReplicationPad1d((0, padding))

    def forward(self, x):
        x = self.padding(x)
        x = x.unfold(-1, self.patch_len, self.stride)
        x = torch.reshape(x, (x.shape[0] * x.shape[1], x.shape[2], x.shape[3]))
        x = self.patch2model(x) + self.position_embedding(x)
        return self.dropout(x)


class PositionalEmbedding(nn.Module):
    def __init__(self, d_model, max_len=5000):
        super(PositionalEmbedding, self).__init__()
        # Compute the positional encodings once in log space.
        pe = torch.zeros(max_len, d_model).float()
        pe.require_grad = False

        position = torch.arange(0, max_len).float().unsqueeze(1)
        div_term = (
            torch.arange(0, d_model, 2).float() * -(math.log(10000.0) / d_model)
        ).exp()

        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)

        pe = pe.unsqueeze(0)
        self.register_buffer("pe", pe)

    def forward(self, x):
        return self.pe[:, : x.size(1)]


class FullAttention(nn.Module):
    def __init__(
        self,
        mask_flag=True,
        factor=5,
        scale=None,
        attention_dropout=0.1,
        output_attention=False,
    ):
        super(FullAttention, self).__init__()
        self.scale = scale
        self.mask_flag = mask_flag
        self.output_attention = output_attention
        self.dropout = nn.Dropout(attention_dropout)

    def forward(self, queries, keys, values, attn_mask, tau=None, delta=None):
        B, L, H, E = queries.shape
        _, S, _, D = values.shape
        scale = self.scale or 1.0 / math.sqrt(E)

        scores = torch.einsum("blhe,bshe->bhls", queries, keys)

        if self.mask_flag:
            if attn_mask is None:
                attn_mask = TriangularCausalMask(B, L, device=queries.device)

            scores.masked_fill_(attn_mask.mask, -np.inf)

        A = self.dropout(torch.softmax(scale * scores, dim=-1))
        V = torch.einsum("bhls,bshd->blhd", A, values)

        if self.output_attention:
            return V.contiguous(), A
        else:
            return V.contiguous(), None


class TriangularCausalMask:
    def __init__(self, B, L, device="cpu"):
        mask_shape = [B, 1, L, L]
        with torch.no_grad():
            self._mask = torch.triu(
                torch.ones(mask_shape, dtype=torch.bool), diagonal=1
            ).to(device)

    @property
    def mask(self):
        return self._mask


class AttentionLayer(nn.Module):
    def __init__(self, attention, d_model, n_heads, d_keys=None, d_values=None):
        super(AttentionLayer, self).__init__()

        d_keys = d_keys or (d_model // n_heads)
        d_values = d_values or (d_model // n_heads)

        self.inner_attention = attention
        self.query_projection = nn.Linear(d_model, d_keys * n_heads)
        self.key_projection = nn.Linear(d_model, d_keys * n_heads)
        self.value_projection = nn.Linear(d_model, d_values * n_heads)
        self.out_projection = nn.Linear(d_values * n_heads, d_model)
        self.n_heads = n_heads

    def forward(self, queries, keys, values, attn_mask, tau=None, delta=None):
        B, L, _ = queries.shape
        _, S, _ = keys.shape
        H = self.n_heads

        queries = self.query_projection(queries).view(B, L, H, -1)
        keys = self.key_projection(keys).view(B, S, H, -1)
        values = self.value_projection(values).view(B, S, H, -1)

        out, attn = self.inner_attention(
            queries, keys, values, attn_mask, tau=tau, delta=delta
        )
        out = out.view(B, L, -1)

        return self.out_projection(out), attn


class EncoderLayer(nn.Module):
    def __init__(self, attention, d_model, d_ff=None, dropout=0.1, activation="relu"):
        super(EncoderLayer, self).__init__()
        d_ff = d_ff or 4 * d_model
        self.attention = attention
        self.conv1 = nn.Conv1d(in_channels=d_model, out_channels=d_ff, kernel_size=1)
        self.conv2 = nn.Conv1d(in_channels=d_ff, out_channels=d_model, kernel_size=1)
        self.norm1 = nn.LayerNorm(d_model)
        self.norm2 = nn.LayerNorm(d_model)
        self.dropout = nn.Dropout(dropout)
        self.activation = F.relu if activation == "relu" else F.gelu

    def forward(self, x, attn_mask=None, tau=None, delta=None):
        new_x, attn = self.attention(x, x, x, attn_mask=attn_mask, tau=tau, delta=delta)
        x = x + self.dropout(new_x)

        y = x = self.norm1(x)
        y = self.dropout(self.activation(self.conv1(y.transpose(-1, 1))))
        y = self.dropout(self.conv2(y).transpose(-1, 1))

        return self.norm2(x + y), attn


class Encoder(nn.Module):
    def __init__(self, attn_layers, conv_layers=None, norm_layer=None):
        super(Encoder, self).__init__()
        self.attn_layers = nn.ModuleList(attn_layers)
        self.conv_layers = (
            nn.ModuleList(conv_layers) if conv_layers is not None else None
        )
        self.norm = norm_layer

    def forward(self, x, attn_mask=None, tau=None, delta=None):
        # x [B, L, D]
        attns = []
        if self.conv_layers is not None:
            for i, (attn_layer, conv_layer) in enumerate(
                zip(self.attn_layers, self.conv_layers)
            ):
                delta = delta if i == 0 else None
                x, attn = attn_layer(x, attn_mask=attn_mask, tau=tau, delta=delta)
                x = conv_layer(x)
                attns.append(attn)
            x, attn = self.attn_layers[-1](x, tau=tau, delta=None)
            attns.append(attn)
        else:
            for attn_layer in self.attn_layers:
                x, attn = attn_layer(x, attn_mask=attn_mask, tau=tau, delta=delta)
                attns.append(attn)

        if self.norm is not None:
            x = self.norm(x)

        return x, attns


class FlattenHead(nn.Module):
    def __init__(self, model_dim, nf, dropout=0):
        super().__init__()
        self.model_dim = model_dim
        self.nf = nf
        self.dropout = nn.Dropout(dropout)
        self.flatten = nn.Flatten(-2)
        self.linear = nn.Linear(nf, model_dim)
        self.mode = "pretrain"

    def forward(self, x):
        x = self.flatten(x)
        if self.mode == "pretrain":
            x = self.linear(x)
            x = torch.sigmoid(x)

        elif self.mode == "rl":
            pass
        x = self.dropout(x)
        return x

    def set_pretrain(self):
        self.mode = "pretrain"

    def set_rl(self):
        self.mode = "rl"


class PatchTST(nn.Module):  # 总体模型
    def __init__(self, config):
        super().__init__()
        self.config = config
        self.model_dim = config.model_dim
        self.patch_len = config.patch_len
        self.stride = config.stride
        self.padding = self.stride  # 或者也可以直接赋值为 config.padding
        self.dropout = config.dropout

        self.patch_embedding = PatchEmbedding(
            self.model_dim, self.patch_len, self.stride, self.padding, self.dropout
        )
        self.encoder = Encoder(
            [
                EncoderLayer(
                    AttentionLayer(
                        FullAttention(
                            False,
                            config.factor,
                            attention_dropout=config.dropout,
                            output_attention=config.output_attention,
                        ),
                        config.d_model,
                        config.n_heads,
                    ),
                    config.d_model,
                    config.d_ff,
                    dropout=config.dropout,
                    activation=config.activation,
                )
                for l in range(config.e_layers)
            ],
            norm_layer=torch.nn.LayerNorm(config.d_model),
        )
        self.head_nf = config.d_model * int(
            (config.seq_len - self.patch_len) / self.stride + 2
        )
        self.flatten_head = FlattenHead(config.window, self.head_nf)
        self.cls_head = nn.Linear(config.window * (config.tech_num + 1), 1)
        self.name = "PatchTST"
        self.mode = None
        self.pretrain_flatten = nn.Flatten(-2)
        self.dropout = nn.Dropout(p=0.1)

    def forward(self, x):
        # ------
        patch_num = int((x.shape[1] - self.patch_len) / self.stride + 2)
        n_var = x.shape[-1]
        # ------归一化
        # means = x.mean(1, keepdim=True).detach()
        # x = x - means
        # stdev = torch.sqrt(torch.var(x, dim=1, keepdim=True, unbiased=False) + 1e-5)
        # x /= stdev
        # ------
        x = x.permute(0, 2, 1)
        x = self.patch_embedding(x)
        x, attns = self.encoder(x)
        x = x.reshape(-1, n_var, patch_num, self.model_dim)
        x = x.permute(0, 1, 3, 2)  # [batch ,n_var, self.model_dim ,patch_num]
        x = self.flatten_head(
            x
        )  # [batch ,n_var, nf] in rl ,[batch ,n_var,horizen] in pretrain
        x = x.permute(
            0, 2, 1
        )  # [batch , nf,n_var] in rl,  [batch , horizen,n_var] in pretrain

        if self.mode == "pretrain":
            x = self.pretrain_flatten(x)  # [batch , horizen*n_var]
            x = self.dropout(x)
            x = self.cls_head(x)  # shape: (batch_size, 1)
            return x
        # ------反归一化
        # x *= stdev
        # x += means
        return x  # [batch , nf,n_var]

    def fit(self, train_data):
        """
        训练 PatchTST 分类模型，用于预测每个期货在未来一段时间内的趋势方向（涨/跌）。
        :param train_data: 输入的 3D 时间序列数据，形状为 (future_num, seq_len, num_features) 的 ndarray。
        """
        future_num, seq_len, num_features = train_data.shape
        lags = self.config.seq_len
        trend_horizon = getattr(self.config, "trend_horizon", 7)
        trend_threshold = getattr(self.config, "trend_threshold", 0.0004)
        margin = getattr(self.config, "margin", 0.001)
        val_ratio = 0.2
        split_idx = int(seq_len * (1 - val_ratio))

        train_x, train_y = [], []
        val_x, val_y = [], []

        for future_idx in range(future_num):
            raw_data = train_data[future_idx]  # shape: (seq_len, num_features)
            diff_data = utils.multi_diff(raw_data.copy(), num_price_cols=5)

            # --- 构造训练集样本 ---
            for i in range(split_idx - lags - trend_horizon):
                x_seq = diff_data[i : i + lags]  # 差分后的输入
                x_raw = raw_data[i : i + lags]  # 原始序列（可选，用于调试）

                # ↓↓↓↓↓ 使用原始序列计算标签 ↓↓↓↓↓
                future_prices = raw_data[i + lags : i + lags + trend_horizon, 0]
                price_now = raw_data[i + lags - 1, 0]
                mean_future_price = np.mean(future_prices)
                trend_strength = (mean_future_price - price_now) / price_now

                if abs(trend_strength) < trend_threshold:
                    continue

                label = (trend_strength + margin) / (2 * margin)
                label = np.clip(label, 0.0, 1.0)

                x_seq = utils.normalize_features(x_seq)
                x_seq = utils.time_mask(x_seq, p=0.15)
                # x_seq = utils.time_shift(x_seq)
                x_seq = utils.time_warp(x_seq, sigma=0.2, knot=6)
                x_seq = utils.add_jittering(x_seq, sigma=0.01)

                train_x.append(x_seq)
                train_y.append([label])

            # --- 构造验证集样本 ---
            for i in range(split_idx, seq_len - lags - trend_horizon):
                x_seq = diff_data[i : i + lags]
                future_prices = raw_data[i + lags : i + lags + trend_horizon, 0]
                price_now = raw_data[i + lags - 1, 0]
                mean_future_price = np.mean(future_prices)
                trend_strength = (mean_future_price - price_now) / price_now

                if abs(trend_strength) < trend_threshold:
                    continue

                label = (trend_strength + margin) / (2 * margin)
                label = np.clip(label, 0.0, 1.0)

                x_seq = utils.normalize_features(x_seq)
                val_x.append(x_seq)
                val_y.append([label])

        # 转为 Tensor
        x_tensor = torch.tensor(train_x, dtype=torch.float32)
        y_tensor = torch.tensor(train_y, dtype=torch.float32)
        x_val = torch.tensor(val_x, dtype=torch.float32)
        y_val = torch.tensor(val_y, dtype=torch.float32)

        # 标签统计
        num_up = (y_tensor >= 0.5).sum().item()
        num_down = (y_tensor < 0.5).sum().item()
        print(
            f"[Train Label Distribution] Up (1): {int(num_up)}, Down (0): {int(num_down)}"
        )

        dataset = TensorDataset(x_tensor, y_tensor)
        dataloader = DataLoader(dataset, batch_size=256, shuffle=True)

        optimizer = torch.optim.Adam(self.parameters(), lr=1e-3, weight_decay=1e-4)
        criterion = torch.nn.BCEWithLogitsLoss()

        for epoch in range(100):
            self.train()
            epoch_loss = 0.0
            for batch_x, batch_y in dataloader:
                batch_x = batch_x.to(self.config.device)
                batch_y = batch_y.to(self.config.device)

                logits = self(batch_x)
                loss = criterion(logits, batch_y)

                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
                epoch_loss += loss.item()

            if (epoch + 1) % 10 == 0:
                print(f"Epoch {epoch + 1}, Loss: {epoch_loss / len(dataloader):.6f}")
                self.eval()
                with torch.no_grad():

                    def evaluate(x_data, y_data, name="Set"):
                        logits = self(x_data.to(self.config.device))
                        probs = torch.sigmoid(logits)
                        preds = (probs > 0.5).float()
                        labels_bin = (y_data.to(self.config.device) >= 0.5).float()

                        correct = (preds == labels_bin).float().mean().item()
                        up_mask = labels_bin == 1
                        down_mask = labels_bin == 0
                        up_acc = (
                            (preds[up_mask] == 1).float().mean().item()
                            if up_mask.any()
                            else 0.0
                        )
                        down_acc = (
                            (preds[down_mask] == 0).float().mean().item()
                            if down_mask.any()
                            else 0.0
                        )

                        print(f"[{name}] Accuracy: {correct:.4f}")
                        print(f"  ↑ Up:   {up_acc:.4f}")
                        print(f"  ↓ Down: {down_acc:.4f}")

                    evaluate(x_val, y_val, "Validation")
                    evaluate(x_tensor, y_tensor, "Train")

        return self

    def set_pretrain(self):
        self.flatten_head.set_pretrain()
        self.mode = "pretrain"

    def set_rl(self):
        self.flatten_head.set_rl()
        self.mode = "rl"

    def set_deterministic_train(self):
        self.flatten_head.set_pretrain()
        self.mode = "deterministic_train"
