from ts_benchmark.baselines.duet.models.duet_model import DUETModel
from ts_benchmark.baselines.deep_forecasting_model_base import DeepForecastingModelBase
# model hyper params
MODEL_HYPER_PARAMS = {
    "enc_in": 1,
    "dec_in": 1,
    "c_out": 1,
    "e_layers": 2,
    "d_layers": 1,
    "d_model": 512,
    "d_ff": 2048,
    "hidden_size": 256,
    "freq": "h",
    "factor": 1,
    "n_heads": 8,
    "seg_len": 6,
    "win_size": 2,
    "activation": "gelu",
    "output_attention": 0,
    "patch_len": 16,
    "stride": 8,
    "period_len": 4,
    "dropout": 0.2,
    "fc_dropout": 0.2,
    "moving_avg": 25,
    "batch_size": 256,
    "lradj": "type3",
    "lr": 0.02,
    "num_epochs": 100,
    "num_workers": 0,
    "loss": "huber",
    "patience": 10,
    "num_experts": 4,
    "noisy_gating": True,
    "k": 1,
    "CI": True,
    "parallel_strategy": "DP"
}

import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from torch.utils.data import Dataset, DataLoader

# 数据预处理模块（支持多变量时间序列）
class TimeSeriesDataset(Dataset):
    def __init__(self, data, window_size=96, horizon=24):
        self.data = torch.FloatTensor(data)
        self.window_size = window_size
        self.horizon = horizon
        self.mean = self.data.mean(0)
        self.std = self.data.std(0)
        self.norm_data = (self.data - self.mean) / self.std

    def __len__(self):
        return len(self.data) - self.window_size - self.horizon

    def __getitem__(self, idx):
        x = self.norm_data[idx:idx+self.window_size]
        y = self.norm_data[idx+self.window_size:idx+self.window_size+self.horizon]
        return x, y

# 双路径预测模型（解决维度匹配问题）
class DualPathForecaster(nn.Module):
    def __init__(self, feat_dim, hidden_dim=256):
        super().__init__()
        # 高频路径：Transformer编码器
        self.trans_encoder = nn.TransformerEncoder(
            encoder_layer=nn.TransformerEncoderLayer(
                d_model=feat_dim,
                nhead=1,
                dim_feedforward=1024
            ),
            num_layers=4
        )
        # 低频路径：双向LSTM
        self.lstm = nn.LSTM(
            input_size=feat_dim,
            hidden_size=hidden_dim,
            num_layers=2,
            bidirectional=True,
            batch_first=True
        )
        # 动态特征融合
        self.fusion = nn.Sequential(
            nn.Linear(feat_dim + 2*hidden_dim, 512),
            nn.GELU(),
            nn.Linear(512, feat_dim)
        )

    def forward(self, x):
        # 输入形状：[B, L, D]
        # print(f"DualPathForecaster==>{x.shape=}")
        h_trans = self.trans_encoder(x)  # [B, L, D]
        h_lstm, _ = self.lstm(x)  # [B, L, 2H]
        
        # 特征拼接与预测
        combined = torch.cat([h_trans[:, -1, :], h_lstm[:, -1, :]], dim=1)
        return self.fusion(combined).unsqueeze(1)  # [B, 1, D]

# 强化学习策略网络（包含正交初始化）
class RLAgent(nn.Module):
    def __init__(self, state_dim, action_dim=3):
        super().__init__()
        # 策略网络
        self.policy = nn.Sequential(
            nn.Linear(state_dim, 256),
            nn.LayerNorm(256),
            nn.GELU(),
            nn.Linear(256, action_dim)
        )
        # 值函数网络
        self.value = nn.Sequential(
            nn.Linear(state_dim, 256),
            nn.LayerNorm(256),
            nn.GELU(),
            nn.Linear(256, 1)
        )
        # 正交初始化
        for layer in [self.policy, self.value]:
            for param in layer.parameters():
                if len(param.shape) >= 2:
                    nn.init.orthogonal_(param)

    def forward(self, state):
        return self.policy(state), self.value(state)

# 完整训练流程（包含梯度正交投影）
class UnifiedTrainer(nn.Module):
    def __init__(self, forecaster, agent):
        super().__init__()
        self.forecaster = forecaster
        self.agent = agent
        self.criterion = nn.HuberLoss()
        
    def forward(self, x, y, config):
        # print(f'{x.shape=}, {y.shape=}')
        opt_forecast = optim.AdamW(self.forecaster.parameters(), lr=1e-4)
        opt_agent = optim.Adam(self.agent.parameters(), lr=3e-5)
        
        total_loss = 0.0
      
        # 预测网络前向传播
        opt_forecast.zero_grad()
        pred = self.forecaster(x)
        # print(f"origin {pred.shape=}")
        pred = pred.repeat(1, y.shape[1], 1)  # [B, H, D]
        # print(f"repeat {pred.shape=}, {y.shape=}")
        loss_pred = self.criterion(pred, y)
        
        # 强化学习策略更新
        opt_agent.zero_grad()
        with torch.no_grad():
            state = pred.detach().mean(dim=1)  # [B, D]
        policy, value = self.agent(state)
        
        # 计算三重奖励
        reward = self._compute_reward(pred, y, policy)
        
        # 策略梯度计算
        advantage = reward - value.squeeze()
        loss_agent = -(policy * advantage.unsqueeze(1)).mean()
        
        # 联合梯度更新
        total_loss = loss_pred + 0.1 * loss_agent
        # total_loss.backward()
        
        # 梯度正交投影
        self._orthogonal_gradient_projection()
        
        # opt_forecast.step()
        # opt_agent.step()
        return (pred, total_loss)

    def _compute_reward(self, pred, true, policy):
        # 预测一致性奖励
        mae = torch.abs(pred - true).mean(dim=(1,2))
        r_pred = torch.exp(-mae)
        
        # 策略平滑性奖励
        action_diff = torch.norm(policy.detach(), dim=1)
        r_smooth = torch.exp(-action_diff)
        
        # 探索奖励
        entropy = torch.std(policy, dim=0).mean()
        r_explore = torch.log(entropy + 1e-6)
        
        return 0.7*r_pred + 0.2*r_smooth + 0.1*r_explore

    def _orthogonal_gradient_projection(self):
        # 对共享参数进行正交投影
        for param in self.forecaster.parameters():
            if param.grad is not None:
                grad_flat = param.grad.view(-1)
                param_flat = param.data.view(-1)
                projection = torch.dot(grad_flat, param_flat) / torch.norm(param_flat)**2
                param.grad -= projection * param_flat.view_as(param.grad)


class DUET(DeepForecastingModelBase):
    """
    DUET adapter class.

    Attributes:
        model_name (str): Name of the model for identification purposes.
        _init_model: Initializes an instance of the DUETModel.
        _adjust_lr：Adjusts the learning rate of the optimizer based on the current epoch and configuration.
        _process: Executes the model's forward pass and returns the output.
    """

    def __init__(self, **kwargs):
        super(DUET, self).__init__(MODEL_HYPER_PARAMS, **kwargs)

    @property
    def model_name(self):
        return "DUET"

    def _init_model(self):
        dim = 7
        forecaster = DualPathForecaster(feat_dim=dim)
        agent = RLAgent(state_dim=dim)
        return UnifiedTrainer(forecaster, agent)
        return DUETModel(self.config)

    def _process(self, input, target, input_mark, target_mark):
        output, loss_importance = self.model(input, target, self.config)
        out_loss = {"output": output}
        if self.model.training:
            out_loss["additional_loss"] = loss_importance
        return out_loss
