import torch
import torch.nn as nn
import numpy as np
from tqdm import tqdm
import time

# GPU or CPU
device = torch.device(
    "cuda") if torch.cuda.is_available() else torch.device("cpu")

import torch
import torch.nn as nn

class FCNN(nn.Module):
    """
    全连接神经网络(FCNN)模型，用于处理带有多种条件输入的回归任务。
    网络结构包含：
    - 多个条件嵌入层
    - 带有残差连接的主干网络
    - 特殊的输出层设计
    """
    
    def __init__(self, hidden_sizes, x_size, time_embed_size, 
                 b_condition_size, b_condition_embed_size, 
                 a_condition_size, a_condition_embed_size):
        """
        初始化FCNN模型
        
        参数:
            hidden_sizes: 列表，包含各隐藏层的神经元数量
            x_size: 输入特征x的维度
            time_embed_size: 时间条件的嵌入维度
            b_condition_size: 内弹道条件(b_condition)的原始维度
            b_condition_embed_size: 内弹道条件的嵌入维度
            a_condition_size: 属性条件(a_condition)的原始维度
            a_condition_embed_size: 属性条件的嵌入维度
        """
        super().__init__()
        self.hidden_sizes = hidden_sizes  # 存储隐藏层大小
        
        # ========== 条件嵌入层 ==========
        # 时间条件嵌入层 (将标量时间转换为高维向量)
        self.time_embed = EmbedFC(1, time_embed_size)
        # 内弹道条件(b_condition)嵌入层 (自定义的时间压力嵌入)
        self.b_condition_embed = TimePressureEmbedder(b_condition_embed_size)
        # 属性条件(a_condition)嵌入层 (全连接嵌入)
        self.a_condition_embed = EmbedFC(a_condition_size, a_condition_embed_size)
        
        # ========== 计算网络尺寸 ==========
        # 主网络输入尺寸 = x维度 + 所有条件嵌入维度
        input_size = x_size + time_embed_size + b_condition_embed_size + a_condition_embed_size
        # 旁路连接尺寸 = 所有条件嵌入维度 (用于输出层)
        bypass_size = time_embed_size + b_condition_embed_size + a_condition_embed_size
        
        # ========== 网络层定义 ==========
        self.layers = nn.ModuleList()  # 主干网络层
        self.norms = nn.ModuleList()   # 层归一化
        
        # 第一层 (输入层)
        self.layers.append(nn.Linear(input_size, hidden_sizes[0]))
        self.norms.append(nn.LayerNorm(hidden_sizes[0]))
        
        # 隐藏层
        for i in range(1, len(hidden_sizes)):
            self.layers.append(nn.Linear(hidden_sizes[i-1], hidden_sizes[i]))
            self.norms.append(nn.LayerNorm(hidden_sizes[i]))
        
        # 输出层 (特殊设计: 包含旁路连接)
        self.output_layer = nn.Sequential(
            nn.Linear(hidden_sizes[-1]+ bypass_size, hidden_sizes[-1]),  # 合并主干输出和条件
            nn.LayerNorm(hidden_sizes[-1]),  # 归一化
            nn.LeakyReLU(0.01),             # 激活函数
            nn.Linear(hidden_sizes[-1], x_size)  # 最终输出层
        )
        
        # ========== 激活函数 & Dropout ==========
        self.leaky_relu = nn.LeakyReLU(0.01)  # LeakyReLU激活 (负斜率0.01)
        self.dropout = nn.Dropout(0.05)       # Dropout层 (丢弃率5%)
        
        # 初始化权重
        self._init_weights()
    
    def _init_weights(self):
        """使用Kaiming初始化方法初始化网络权重"""
        for layer in self.layers:
            if isinstance(layer, nn.Linear):
                # 使用LeakyReLU的Kaiming初始化
                nn.init.kaiming_normal_(layer.weight, mode='fan_in', 
                                       nonlinearity='leaky_relu', a=0.01)
                nn.init.constant_(layer.bias, 0)  # 偏置初始化为0
    
    def forward(self, x, time, b_condition, a_condition, context_mask):
        """
        前向传播
        
        参数:
            x: 输入特征 [batch_size, x_size]
            time: 时间条件 [batch_size, 1]
            b_condition: 内弹道条件 [batch_size, b_condition_size]
            a_condition: 属性条件 [batch_size, a_condition_size]
            context_mask: 上下文掩码 [batch_size, 1]
            
        返回:
            输出预测 [batch_size, x_size]
        """
        # ========== 条件嵌入 ==========
        time_embed = self.time_embed(time)                # 时间嵌入
        b_cond_embed = self.b_condition_embed(b_condition)  # 内弹道条件嵌入
        a_cond_embed = self.a_condition_embed(a_condition)  # 属性条件嵌入
        
        # ========== 合并条件 ==========
        # 将条件合并
        condition = torch.cat([b_cond_embed, a_cond_embed], dim=1)
        # 应用上下文掩码 (用于条件屏蔽)
        condition = condition * (1.0 - context_mask)
        
        # ========== 初始输入 ==========
        # 合并输入特征和所有条件
        x = torch.cat([x, time_embed, condition], dim=1)
        
        # ========== 主干网络前向传播 ==========
        for i, layer in enumerate(self.layers):
            # 残差连接准备 (从第二层开始)
            if i > 0:
                # 如果尺寸不匹配，使用线性变换调整shortcut
                if x.shape[1] != self.hidden_sizes[i]:
                    shortcut = nn.Linear(x.shape[1], self.hidden_sizes[i])(x)
                else:
                    shortcut = x  # 尺寸匹配则直接使用x作为shortcut
            
            # 主干计算
            x = layer(x)          # 线性变换
            x = self.dropout(x)    # Dropout
            x = self.norms[i](x)   # 层归一化
            
            # 应用残差连接 (从第二层开始)
            if i > 0:
                x = x + shortcut   # 残差连接
            
            x = self.leaky_relu(x)  # 激活函数
        
        # ========== 输出层 ==========
        # 合并主干输出和所有条件 (旁路连接)
        x = torch.cat([x, time_embed, condition], dim=1)
        x = self.output_layer(x)  # 通过输出层
        
        return x


class EmbedFC(nn.Module):
    def __init__(self, input_dim, emb_dim):
        """
        全连接嵌入层
        
        参数:
            input_dim: 输入维度
            emb_dim: 嵌入维度
        """
        super().__init__()
        self.input_dim = input_dim
        self.model = nn.Sequential(
            nn.Linear(input_dim, emb_dim),
            nn.LayerNorm(emb_dim),  # 添加层归一化稳定训练
            nn.LeakyReLU(0.01)     # 使用与主网络一致的激活函数
        )
        # 使用Kaiming初始化与主网络一致
        nn.init.kaiming_normal_(self.model[0].weight, mode='fan_in', 
                              nonlinearity='leaky_relu', a=0.01)
        nn.init.constant_(self.model[0].bias, 0)

    def forward(self, x):
        x = x.view(-1, self.input_dim)
        return self.model(x)


class TimeSeriesEmbed(nn.Module):
    def __init__(self, sequence_length, input_dim, embed_dim, num_heads, num_layers):
        super(TimeSeriesEmbed, self).__init__()
        self.embedding = nn.Linear(input_dim, embed_dim)
        self.positional_encoding = nn.Parameter(
            torch.zeros(1, sequence_length, embed_dim))
        self.transformer_encoder = nn.TransformerEncoder(nn.TransformerEncoderLayer(
            d_model=embed_dim, nhead=num_heads), num_layers=num_layers)
        self.pooling = nn.AdaptiveAvgPool1d(1)

    def forward(self, x):
        # x: (batch_size, sequence_length, input_dim)
        x = self.embedding(x)  # (batch_size, sequence_length, embed_dim)
        x = x + self.positional_encoding  # Add positional encoding
        x = x.permute(1, 0, 2)  # (sequence_length, batch_size, embed_dim)
        # (sequence_length, batch_size, embed_dim)
        x = self.transformer_encoder(x)
        x = x.permute(1, 2, 0)  # (batch_size, embed_dim, sequence_length)
        x = self.pooling(x)  # (batch_size, embed_dim, 1)
        x = x.squeeze(-1)  # (batch_size, embed_dim)
        return x

class TimePressureEmbedder(nn.Module):
    def __init__(self, embed_dim=64):
        super().__init__()
        """
        纯CNN时间-压力嵌入器
        输入: (batch_size, seq_len, 2) 其中2表示(time, pressure)特征
        输出: (batch_size, embed_dim) 的标准化嵌入向量
        
        网络结构维度变化示意图:
        [batch, 2, seq_len] 
        → Conv1d(2,32) → [batch, 32, seq_len]
        → MaxPool1d(2) → [batch, 32, seq_len//2]
        → Conv1d(32,64) → [batch, 64, seq_len//2]
        → MaxPool1d(2) → [batch, 64, seq_len//4]
        → Conv1d(64,128) → [batch, 128, seq_len//4]
        → MaxPool1d(2) → [batch, 128, seq_len//8]
        → Conv1d(128,embed_dim) → [batch, embed_dim, seq_len//8]
        → AdaptiveAvgPool1d(1) → [batch, embed_dim, 1]
        → squeeze(-1) → [batch, embed_dim]
        """
        self.cnn = nn.Sequential(
            # 第1层卷积: 扩展通道维度
            # 输入: [batch, 2, seq_len]
            nn.Conv1d(
                in_channels=2,         # 输入通道数(time+pressure)
                out_channels=32,       # 输出通道数
                kernel_size=3,         # 卷积核宽度
                padding=1              # 保持长度不变
            ),  # 输出: [batch, 32, seq_len]
            nn.BatchNorm1d(32),        # 保持维度: [batch, 32, seq_len]
            nn.ReLU(inplace=True),
            nn.MaxPool1d(2),           # 下采样: [batch, 32, seq_len//2]
            
            # 第2层卷积: 增加特征抽象能力
            # 输入: [batch, 32, seq_len//2]
            nn.Conv1d(32, 64, 3, padding=1),  # [batch, 64, seq_len//2]
            nn.BatchNorm1d(64),        # [batch, 64, seq_len//2]
            nn.ReLU(inplace=True),
            nn.MaxPool1d(2),           # [batch, 64, seq_len//4]
            
            # 第3层卷积: 捕获高级时序模式
            # 输入: [batch, 64, seq_len//4]
            nn.Conv1d(64, 128, 3, padding=1),  # [batch, 128, seq_len//4]
            nn.BatchNorm1d(128),      # [batch, 128, seq_len//4]
            nn.ReLU(inplace=True),
            nn.MaxPool1d(2),           # [batch, 128, seq_len//8]
            
            # 第4层卷积: 投影到嵌入空间
            # 输入: [batch, 128, seq_len//8]
            nn.Conv1d(128, embed_dim, 3, padding=1),  # [batch, embed_dim, seq_len//8]
            nn.BatchNorm1d(embed_dim), # [batch, embed_dim, seq_len//8]
            nn.ReLU(inplace=True),
            
            # 全局平均池化: 将任意长度序列压缩为1
            nn.AdaptiveAvgPool1d(1)    # [batch, embed_dim, 1]
        )
        
        # 嵌入空间规范化
        self.layer_norm = nn.LayerNorm(embed_dim)
        
    def forward(self, x):
        """
        前向传播过程
        输入x: (batch_size, seq_len, 2)
        1. 调整维度为CNN需要的 [batch, channels, seq_len] 格式
        2. 通过CNN提取特征
        3. 压缩最后维度并标准化
        """
        # 初始维度转换
        x = x.permute(0, 2, 1)  # [batch, 2, seq_len]
        
        # CNN处理 (维度变化见网络结构注释)
        x = self.cnn(x)         # [batch, embed_dim, 1]
        x = x.squeeze(-1)       # [batch, embed_dim]
        
        # 层标准化
        return self.layer_norm(x)

def ddpm_schedules(beta1, beta2, T):
    """
    Returns pre-computed schedules for DDPM sampling, training process.
    """
    assert beta1 < beta2 < 1.0, "beta1 and beta2 must be in (0, 1)"

    beta_t = (beta2 - beta1) * torch.arange(0, T +
                                            1, dtype=torch.float32) / T + beta1
    sqrt_beta_t = torch.sqrt(beta_t)
    alpha_t = 1 - beta_t
    log_alpha_t = torch.log(alpha_t)
    alphabar_t = torch.cumsum(log_alpha_t, dim=0).exp()

    sqrtab = torch.sqrt(alphabar_t)
    oneover_sqrta = 1 / torch.sqrt(alpha_t)

    sqrtmab = torch.sqrt(1 - alphabar_t)
    mab_over_sqrtmab_inv = (1 - alpha_t) / sqrtmab

    return {
        "alpha_t": alpha_t,  # \alpha_t
        "oneover_sqrta": oneover_sqrta,  # 1/\sqrt{\alpha_t}
        "sqrt_beta_t": sqrt_beta_t,  # \sqrt{\beta_t}
        "alphabar_t": alphabar_t,  # \bar{\alpha_t}
        "sqrtab": sqrtab,  # \sqrt{\bar{\alpha_t}}
        "sqrtmab": sqrtmab,  # \sqrt{1-\bar{\alpha_t}}
        # (1-\alpha_t)/\sqrt{1-\bar{\alpha_t}}
        "mab_over_sqrtmab": mab_over_sqrtmab_inv,
    }


class DDPM(nn.Module):
    def __init__(self, nn_model, betas, n_T, device, std=0.1, drop_prob=0.1):
        super(DDPM, self).__init__()
        self.nn_model = nn_model.to(device)
        num_params = sum(p.numel() for p in nn_model.parameters())
        print(f"Parameter number: {num_params*1e-6}M")
        # register_buffer allows accessing dictionary produced by ddpm_schedules
        # e.g. can access self.sqrtab later
        for k, v in ddpm_schedules(betas[0], betas[1], n_T).items():
            self.register_buffer(k, v)

        self.n_T = n_T
        self.device = device
        self.drop_prob = drop_prob
        std = torch.tensor(std)
        self.std = nn.Parameter(std, requires_grad=False)
        self.loss_mse = nn.MSELoss()

    def forward(self, x, b_condition, a_condition):
        """
        this method is used in training, so samples t and noise randomly
        """
        x = x / self.std
        _ts = torch.randint(
            1, self.n_T+1, (x.shape[0],), device=self.device)  # t ~ Uniform(0, n_T)
        noise = torch.randn_like(x)  # eps ~ N(0, 1)

        x_t = (
            self.sqrtab[_ts, None] * x
            + self.sqrtmab[_ts, None] * noise
        )  # This is the x_t, which is sqrt(alphabar) x_0 + sqrt(1-alphabar) * eps
        # We should predict the "error term" from this x_t. Loss is what we return.

        # dropout context with some probability
        context_mask = torch.bernoulli(
            torch.zeros_like(a_condition[:, 0:1])+self.drop_prob).to(self.device)
        
        # return MSE between added noise, and our predicted noise
        pre_noise = self.nn_model(
            x_t, _ts / self.n_T, b_condition, a_condition, context_mask)
        # return self.loss_mse(noise, pre_noise)
        # the noise of lv and gv+av1, respectively
        return self.loss_mse(noise[:, :-5], pre_noise[:, :-5]) + 10 * self.loss_mse(noise[:, -5:], pre_noise[:, -5:])
        #return self.loss_mse(noise, pre_noise)

    def sample_single_cond(self, n_sample, x_size, guide_w, b_condition, a_condition):
        b_condition=b_condition.unsqueeze(0).repeat(n_sample, 1, 1)
        a_condition=a_condition.unsqueeze(0).repeat(n_sample, 1)
        return self.sample_multiple_cond(x_size, guide_w, b_condition, a_condition)

    def sample_multiple_cond(self, x_size, guide_w, b_condition, a_condition):
        # we follow the guidance sampling scheme described in 'Classifier-Free Diffusion Guidance'
        # to make the fwd passes efficient, we concat two versions of the dataset,
        # one with context_mask=0 and the other context_mask=1
        # we then mix the outputs with the guidance scale, w
        # where w>0 means more guidance

        # x_T ~ N(0, 1), sample initial noise
        n_sample = a_condition.shape[0]
        x_i = torch.randn(n_sample, x_size).to(device)
        b_condition = b_condition.to(device)
        a_condition = a_condition.to(device)

        # don't drop context at test time
        context_mask = torch.zeros_like(a_condition[:, 0:1]).to(device)

        # double the batch
        b_condition = b_condition.repeat(2, 1, 1)
        a_condition = a_condition.repeat(2, 1)
        context_mask = context_mask.repeat(2, 1)
        context_mask[n_sample:] = 1.  # makes second half of batch context free

        list_of_time_steps = list(range(self.n_T, 0, -1))
        tqdm_denoise = tqdm(list_of_time_steps, desc='sampling  ',
                            colour='MAGENTA', dynamic_ncols=True)
        
        # 预生成所有随机噪声
        noise_z = torch.randn(self.n_T, n_sample, x_size, device=device)
        for i in tqdm_denoise:
            t_is = torch.full((n_sample, 1), i / self.n_T, device=device)

            # double batch
            x_i = x_i.repeat(2, 1)
            t_is = t_is.repeat(2, 1)

            z = noise_z[i-1] if i > 1 else 0

            # split predictions and compute weighting
            eps = self.nn_model(
                x_i, t_is[:, 0], b_condition, a_condition, context_mask)
            eps1 = eps[:n_sample]
            eps2 = eps[n_sample:]
            eps = (1.0+guide_w)*eps1 - guide_w*eps2
            x_i = x_i[:n_sample]

            x_i = (
                self.oneover_sqrta[i] * (x_i - eps * self.mab_over_sqrtmab[i])
                + self.sqrt_beta_t[i] * z
            )
            # 更新进度条
            tqdm_denoise.set_postfix(T=f'{i}')
        return x_i*self.std
    
    #无条件采样，仅仅用于测试
    def sample_single_uncond(self, n_sample, x_size, b_condition, a_condition):
        b_condition=b_condition.unsqueeze(0).repeat(n_sample, 1, 1)
        a_condition=a_condition.unsqueeze(0).repeat(n_sample, 1)
        return self.sample_multiple_uncond(x_size, b_condition, a_condition)

    def sample_multiple_uncond(self, x_size, b_condition, a_condition):
        
        # x_T ~ N(0, 1), sample initial noise
        n_sample = a_condition.shape[0]
        x_i = torch.randn(n_sample, x_size).to(device)
        b_condition = b_condition.to(device)
        a_condition = a_condition.to(device)

        # don't drop context at test time
        context_mask = torch.ones_like(a_condition[:, 0:1]).to(device)

        list_of_time_steps = list(range(self.n_T, 0, -1))
        tqdm_denoise = tqdm(list_of_time_steps, desc='sampling  ',
                            colour='MAGENTA', dynamic_ncols=True)
        
        # 预生成所有随机噪声
        noise_z = torch.randn(self.n_T, n_sample, x_size, device=device)
        for i in tqdm_denoise:
            t_is = torch.full((n_sample, 1), i / self.n_T, device=device)

            z = noise_z[i-1] if i > 1 else 0

            # 在无条件情况下，b_condition和a_condition不需要传递
            eps = self.nn_model(
                x_i, t_is[:, 0], b_condition, a_condition, context_mask)

            x_i = (
                self.oneover_sqrta[i] * (x_i - eps * self.mab_over_sqrtmab[i])
                + self.sqrt_beta_t[i] * z
            )
            # 更新进度条
            tqdm_denoise.set_postfix(T=f'{i}')
        return x_i * self.std
