import math

import torch
from torch import nn


class WindIcingModelV1(nn.Module):
    # 简单CNN + LSTM
    def __init__(self,
                 seq_len, window_size, feature_dim,
                 dropout=0.1,
                 device=torch.device('cpu')
                 ):
        super().__init__()
        self.seq_len = seq_len
        self.window_size = window_size
        self.feature_dim = feature_dim
        self.device = device
        
        self.h_dim = 32
        
        self.dropout = nn.Dropout(dropout)
        
        self.fc1 = nn.Sequential(
            nn.Identity(),
            # nn.Linear(self.feature_dim, self.h_dim),
            # nn.ReLU(),
            # nn.Dropout(dropout),
        )
        
        self.conv_group = nn.Sequential(
            nn.Conv2d(in_channels=1, out_channels=8, kernel_size=5, stride=1, padding=2),
            nn.ReLU(),
            nn.Conv2d(in_channels=8, out_channels=16, kernel_size=5, stride=1, padding=0),
            nn.ReLU(),
            nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=0),
            nn.ReLU(),
            nn.MaxPool2d(kernel_size=2),
        )
        
        self.flatten = nn.Flatten()
        
        self.fc2 = nn.LazyLinear(out_features=1024)
        
        self.LSTM = nn.LSTM(input_size=1024, hidden_size=1024, batch_first=True, num_layers=4)
        
        self.fc_head = nn.Sequential(
            nn.Linear(in_features=1024, out_features=512),
            nn.ReLU(),
            nn.Linear(in_features=512, out_features=256),
            nn.ReLU(),
            nn.Linear(in_features=256, out_features=2),
        )
    
    def forward(self, inputs):
        return self.my_forward(inputs)
    
    def my_forward(self, inputs, debug=False):
        # inputs.shape = (batch_size, seq_len, window_size, feature_dim)
        # fc1_output (batch_size, seq_len, window_size, h_dim)
        fc1_output = self.fc1(inputs)
        if debug:
            print('fc1_output', fc1_output.shape)
        # 为了便于卷积通道运算，增加一个通道维度
        # (batch_size, seq_len, 1, window_size, h_dim)
        
        conv_input = fc1_output.unsqueeze(2).view(-1, 1, fc1_output.shape[-2], fc1_output.shape[-1])
        conv_output = self.dropout(self.conv_group(conv_input))
        if debug:
            print("conv_output", conv_output.shape)
        flatten_output = self.flatten(conv_output)
        if debug:
            print("flatten_output", flatten_output.shape)
        
        fc2_output = self.dropout(nn.functional.relu(self.fc2(flatten_output)))
        
        if debug:
            print("fc2_output", fc2_output.shape)
        
        lstm_input = fc2_output.view(-1, self.seq_len, fc2_output.shape[-1])
        if debug:
            print("lstm_input", lstm_input.shape)
        
        lstm_output, _ = self.LSTM(lstm_input)
        if debug:
            print("lstm_output", lstm_output.shape)
        
        output = self.fc_head(nn.functional.relu(lstm_output))
        if debug:
            print("output", output.shape)
        return output


class WindIcingModelV2(nn.Module):
    # 使用transformer编码器架构
    def __init__(self,
                 seq_len, window_size, feature_dim, h_dim,
                 d_model1, n_head1, dim_feedforward1, num_layers1,
                 d_model2, n_head2, dim_feedforward2, num_layers2,
                 dropout=0.1, device=torch.device('cpu')):
        super().__init__()
        self.seq_len = seq_len
        self.window_size = window_size
        self.feature_dim = feature_dim
        self.h_dim = h_dim
        self.d_model1 = d_model1
        self.n_head1 = n_head1
        self.dim_feedforward1 = dim_feedforward1
        self.num_layers1 = num_layers1
        self.d_model2 = d_model2
        self.n_head2 = n_head2
        self.dim_feedforward2 = dim_feedforward2
        self.num_layers2 = num_layers2
        
        self.dropout = nn.Dropout(dropout)
        self.device = device
        
        self.pe1 = PositionalEncoding(d_model1, dropout=dropout, max_len=max(self.window_size, self.seq_len)+5)
        self.pe2 = PositionalEncoding(d_model2, dropout=dropout, max_len=max(self.window_size, self.seq_len)+5)
        # 输入形状应该是(batch_size, seq_len, window_size, feature_dim)
        # 先进行一层线性变化, 变换成(batch_size, seq_len, window_size, h_dim -> d_model1)
        self.fc1 = nn.Sequential(
            nn.Linear(in_features=self.feature_dim, out_features=self.h_dim),
            nn.ReLU(),
            nn.Linear(in_features=self.h_dim, out_features=self.d_model1),
            nn.ReLU(),
            nn.Dropout(dropout)
        )
        # 形状变换成(batch_size * seq_len, window_size, h_dim)，在窗口维度上进行transformer自注意力计算
        # 输出形状为(batch_size * seq_len, window_size, d_model)
        self.transformer_encoder1 = nn.TransformerEncoder(
            encoder_layer=nn.TransformerEncoderLayer(
                d_model=self.d_model1,
                nhead=self.n_head1,
                dim_feedforward=self.dim_feedforward1,
                dropout=dropout,
                batch_first=True
            ),
            num_layers=self.num_layers1
        )
        # self.conv = nn.Sequential(
        #     nn.Conv2d(in_channels=1, out_channels=8, kernel_size=5, stride=1, padding=2),
        #     nn.ReLU(),
        #     nn.Conv2d(in_channels=8, out_channels=16, kernel_size=5, stride=1, padding=0),
        #     nn.ReLU(),
        #     nn.Conv2d(in_channels=16, out_channels=32, kernel_size=5, stride=1, padding=0),
        # )
        
        # flatten 展平降维, 形状为(batch_size * seq_len, d_model2)
        self.reduction = nn.Sequential(
            nn.Flatten(),
            nn.Linear(in_features=self.window_size * self.d_model1, out_features=self.d_model2),
            nn.ReLU(),
            nn.Dropout(dropout)
        )
        # 变换形状为(batch_size, seq_len, d_model2)
        # 增加cls头部，变成(batch_size, seq_len + 1, d_model2)
        self.transformer_encoder2 = nn.TransformerEncoder(
            encoder_layer=nn.TransformerEncoderLayer(
                d_model=self.d_model2,
                nhead=self.n_head2,
                dim_feedforward=self.dim_feedforward2,
                dropout=dropout,
                batch_first=True
            ),
            num_layers=self.num_layers2
        )
        
        self.fc_out = nn.Linear(in_features=self.d_model2, out_features=2)
    
    def forward(self, inputs):
        return self.my_forward(inputs)
    def my_forward(self, inputs, debug=False):
        device = inputs.device
        batch_size = inputs.shape[0]
        
        if debug:
            # (batch_size, seq_len, window_size, feature_dim)
            print("inputs", inputs.shape)
        
        linear1_output = self.fc1(inputs)
        if debug:
            # (batch_size, seq_len, window_size, d_model1)
            print("linear1_output", linear1_output.shape)
        
        tfe1_input = self.pe1(linear1_output.view(-1, self.window_size, self.d_model1))
        if debug:
            # (batch_size * seq_len, window_size, d_model)
            print("tfe1_input", tfe1_input.shape)
        
        tfe1_output = self.transformer_encoder1(tfe1_input)
        if debug:
            # (batch_size * seq_len, window_size, d_model)
            print("tfe1_output", tfe1_output.shape)
            
        # (batch_size * seq_len, d_model2) -> (batch_size, seq_len + 1, d_model2)
        reduction_output = self.reduction(tfe1_output).view(-1, self.seq_len, self.d_model2)
        cls_token = torch.zeros(batch_size, 1, self.d_model2, device=device)
        tfe2_input = self.pe2(torch.cat((cls_token, reduction_output), dim=1))
        if debug:
            print("tfe2_input", tfe2_input.shape)
            
        
        tfe2_output = self.transformer_encoder2(tfe2_input)
        if debug:
            # (batch_size, seq_len + 1, d_model2)
            print('tfe2_output', tfe2_output.shape)
        output = self.fc_out(tfe2_output)
        if debug:
            # (batch_size, seq_len + 1, 2)
            print("output", output.shape)
        return output

class PositionalEncoding(nn.Module):
    def __init__(self, d_model, dropout=0.1, max_len=5000):
        super(PositionalEncoding, self).__init__()
        self.dropout = nn.Dropout(p=dropout)

        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        self.register_buffer('pe', pe)

    def forward(self, x):
        # x的形状是(batch_size, seq_len, d_model)，我们需要将其与pe相加
        # 由于pe的形状是(d_model, max_len)，我们需要扩展x的第一个维度
        # 使其与pe的d_model维度对齐
        x = x + self.pe[:x.size(1), :].unsqueeze(0)  # 切片pe并添加批次维度
        return self.dropout(x)
class WindIcingModelV3(nn.Module):
    def __init__(self, seq_len, window_size, feature_dim, h_dim,
                 d_model1, n_head1, dim_feedforward1, num_layers1,
                 d_model2, n_head2, dim_feedforward2, num_layers2,
                 dropout=0.1, device=torch.device('cpu')):
        super().__init__()
        self.seq_len = seq_len
        self.window_size = window_size
        self.feature_dim = feature_dim
        self.h_dim = h_dim
        self.d_model1 = d_model1
        self.n_head1 = n_head1
        self.dim_feedforward1 = dim_feedforward1
        self.num_layers1 = num_layers1
        self.d_model2 = d_model2
        self.n_head2 = n_head2
        self.dim_feedforward2 = dim_feedforward2
        self.num_layers2 = num_layers2
        self.device = device
        # 先对输入张量的维度变换
        # 输入形状应该是(batch_size, seq_len, window_size, feature_dim)
        # 先进行一层线性变化, 变换成(batch_size, seq_len, window_size, h_dim -> d_model1)
        self.fc1 = nn.Sequential(
            nn.Linear(in_features=self.feature_dim, out_features=self.h_dim),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(in_features=self.h_dim, out_features=self.d_model1),
            nn.ReLU(),
            nn.Dropout(dropout)
        )
        
        
        # 形状变换成(batch_size * seq_len, window_size, h_dim)，在窗口维度上进行transformer自注意力计算
        # 增加window_size维度上增加cls预测头，预测每个窗口内的平均功率
        # 增加位置编码
        # 输出形状为(batch_size * seq_len, window_size + 1, d_model1)
        self.pe1 = PositionalEncoding(d_model1, dropout=dropout, max_len=max(self.window_size, self.seq_len)+5)
        self.pe2 = PositionalEncoding(d_model2, dropout=dropout, max_len=max(self.window_size, self.seq_len)+5)
        
        self.te1 = nn.TransformerEncoder(
            encoder_layer=nn.TransformerEncoderLayer(
                d_model=self.d_model1,
                nhead=self.n_head1,
                dim_feedforward=self.dim_feedforward1,
                dropout=dropout,
                batch_first=True
            ),
            num_layers=self.num_layers1
        )
        
        # 取cls预测每个窗口的平均功率
        self.power_mlp = nn.Sequential(
            nn.Linear(in_features=self.d_model1, out_features=self.h_dim),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(in_features=self.h_dim, out_features=self.h_dim // 2),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(in_features=self.h_dim // 2, out_features=1)
        )
        
        # 去掉cls头，变成(batch_size * seq_len, window_size, d_model1)
        # 将预测功率与实际功率功率线性变换为d_model1向量，再与原始拼接
        self.power_fc = nn.Sequential(
            nn.Linear(in_features=2, out_features=self.d_model1 // 2),
            nn.ReLU(),
            nn.Linear(in_features=self.d_model1 // 2, out_features=self.d_model1),
            nn.ReLU(),
            nn.Dropout(dropout)
        )
        
        # flatten 展平降维, 形状为(batch_size * seq_len, d_model2)
        self.reduction = nn.Sequential(
            nn.Flatten(),
            nn.Linear(in_features=self.window_size * self.d_model1, out_features=self.d_model2),
            nn.ReLU(),
            nn.Dropout(dropout)
        )
        
        # 变换形状为(batch_size, seq_len, d_model2)
        # 增加cls头部，变成(batch_size, seq_len + 1, d_model2)
        self.te2 = nn.TransformerEncoder(
            encoder_layer=nn.TransformerEncoderLayer(
                d_model=self.d_model2,
                nhead=self.n_head2,
                dim_feedforward=self.dim_feedforward2,
                dropout=dropout,
                batch_first=True
            ),
            num_layers=self.num_layers2
        )
        
        self.icing_mlp = nn.Sequential(
            nn.Linear(in_features=self.d_model2, out_features=self.d_model2 // 2),
            nn.ReLU(),
            nn.Dropout(dropout),
            nn.Linear(in_features=self.d_model2 // 2, out_features=2)
        )
    def forward(self, input, power):
        return self.my_forward(input, power)
    def my_forward(self, input, power, debug=False):
        # input.shape: (batch_size, seq_len, window_size, feature_dim)
        # power.shape: (batch_size, seq_len)
        # mask.shape: (batch_size, seq_len)
        batch_size = input.size(0)
        device = input.device
        if debug:
            print("input.shape: ", input.shape)
            print("power.shape: ", power.shape)
            # print("label.shape: ", label.shape)
            # if src_key_padding_mask is not None:
            #     print("src_key_padding_mask.shape: ", src_key_padding_mask.shape)
        
        te1_output = self.tfe1_compute(input, batch_size, device, debug)
        
        # 取编码器第一个位置的预测张量
        # power_mlp_input.shape: (batch_size*seq_len, d_model1)
        power_mlp_input = te1_output[:, 0, :]
        if debug:
            print("power_mlp_input.shape: ", power_mlp_input.shape)
        
        power_preds = self.power_pred_compute(power_mlp_input, batch_size, debug)
        
        # TODO 一种方案是不去除cls头，将其作为功率特征融合进下一层；另一种方案是将预测的风速值添加到特征中
        # TODO 这里的方案是将预测功率与实际功率拼接，与原特征融合
        # 去除第一个cls头
        # reduction_input.shape: (batch_size*seq_len, window_size, d_model1)
        reduction_input = te1_output[:, 1:, :]
        if debug:
            print("reduction_input.shape: ", reduction_input.shape)
        
        # 转换为第二个Transformer维度
        reduction_output = self.tfe2_pre_compute(reduction_input, batch_size, debug)
        
        # 预测功率与原始功率转换成第二个Transformer维度
        power_fc_output = self.power_pred_transform(power_preds, power, debug)
        
        # 添加预测头
        cls_token2 = torch.zeros(batch_size, 1, self.d_model2, device=device)
        # TODO 功率的融合直接以隐向量相加的形式？有没有更好的融合形式
        # te2_input.shape: (batch_size, seq_len+1, d_model2)
        te2_input = self.pe2(torch.cat([cls_token2, reduction_output + power_fc_output], dim=1))
        if debug:
            print("te2_input.shape: ", te2_input.shape)
        
        # 第二个Transformer计算和结冰标签预测
        icing_preds = self.tfe2_compute_label_pred(te2_input, debug)
        
        return power_preds, icing_preds
    
    def tfe2_compute_label_pred(self, te2_input, debug):
        # te2_output.shape: (batch_size, seq_len+1, d_model2)
        te2_output = self.te2(te2_input)
        if debug:
            print("te2_output.shape: ", te2_output.shape)
        # 取第一个预测头
        # icing_mlp_input.shape: (batch_size, d_model)
        icing_mlp_input = te2_output[:, 0, :]
        if debug:
            print("icing_mlp_input.shape: ", icing_mlp_input.shape)
        # icing_mlp_output.shape: (batch_size, 2)
        icing_mlp_output = self.icing_mlp(icing_mlp_input)
        if debug:
            print("icing_mlp_output.shape: ", icing_mlp_output.shape)
        icing_preds = icing_mlp_output
        return icing_preds
    
    def power_pred_transform(self, power_preds, power, debug):
        # 融合预测的功率特征
        # power_input.shape: (batch_size, seq_len, 2)
        power_input = torch.stack((power_preds, power), dim=2)
        if debug:
            print(power_preds.dtype, power.dtype)
            print("power_input.shape: ", power_input.shape)
        # 改变维度
        # power_fc_output.shape: (batch_size, seq_len, d_model2)
        power_fc_output = self.power_fc(power_input)
        if debug:
            print("power_fc_output.shape: ", power_fc_output.shape)
        return power_fc_output
    
    def tfe2_pre_compute(self, reduction_input, batch_size, debug):
        # 降维，将window_size维度展平
        # reduction_output.shape: (batch_size*seq_len, d_model2)
        reduction_output = self.reduction(reduction_input)
        if debug:
            print("reduction_output.shape: ", reduction_output.shape)
        # 变换形状，供第二个transformer进行序列维度的注意力计算
        # reduction_output_view.shape:  (batch_size, seq_len, d_model2)
        reduction_output_view = reduction_output.view(batch_size, self.seq_len, self.d_model2)
        if debug:
            print("reduction_output_view.shape: ", reduction_output_view.shape)
        return reduction_output_view
    
    def power_pred_compute(self, power_mlp_input, batch_size, debug):
        # power_mlp_output.shape: (batch_size*seq_len, 1)
        power_mlp_output = self.power_mlp(power_mlp_input)
        if debug:
            print("power_mlp_output.shape: ", power_mlp_output.shape)
        # 每个窗口预测的平均功率
        # power_preds.shape: (batch_size, seq_len)
        power_preds = power_mlp_output.view(batch_size, self.seq_len)
        if debug:
            print("power_preds.shape: ", power_preds.shape)
        return power_preds
    
    def tfe1_compute(self, input, batch_size, device, debug):
        # 线性转换成transformer隐层维度
        # fc1_output.shape: (batch_size, seq_len, window_size, d_model1)
        fc1_output = self.fc1(input)
        if debug:
            print("fc1_output.shape: ", fc1_output.shape)
        # 形状变化供transformer进行窗口维度的注意力计算
        # fc1_output_view.shape: (batch_size*seq_len, window_size, d_model1)
        fc1_output_view = fc1_output.view(batch_size * self.seq_len, self.window_size, self.d_model1)
        if debug:
            print("fc1_output_view.shape: ", fc1_output_view.shape)
        # 加上0值预测头，用于预测每个窗口内功率平均值
        cls_token1 = torch.zeros(batch_size * self.seq_len, 1, self.d_model1, device=device)
        # 位置编码
        # te1_input.shape:  (batch_size*seq_len, window_size+1, d_model1)
        te1_input = self.pe1(torch.cat([cls_token1, fc1_output_view], dim=1))
        if debug:
            print("te1_input.shape: ", te1_input.shape)
        # 第一层transformer计算
        # te1_output.shape: (batch_size * seq_len, window_size + 1, d_model1)
        te1_output = self.te1(te1_input)
        if debug:
            print("te1_output.shape: ", te1_output.shape)
        return te1_output


class WindIcingModelV4(WindIcingModelV3):
    def __init__(self, seq_len, window_size, feature_dim, h_dim, d_model1, n_head1, dim_feedforward1, num_layers1,
                 d_model2, n_head2, dim_feedforward2, num_layers2,
                 dropout=0.1, device=torch.device('cpu'),
                 **kwargs):
        super().__init__(seq_len, window_size, feature_dim, h_dim, d_model1, n_head1, dim_feedforward1, num_layers1,
                         d_model2, n_head2, dim_feedforward2, num_layers2, dropout, device)
        
        self.power_feature_fc = nn.Sequential(
            nn.LazyLinear(out_features=self.d_model2 // 2),
            nn.ReLU(),
            nn.LazyLinear(out_features=self.d_model2),
            nn.ReLU(),
            nn.Dropout(),
        )
    
    def forward(self, input, power_feature):
        return self.my_forward(input, power_feature)
    
    def my_forward(self, input, power_feature, debug=False):
        power, power_feature = power_feature
        # input.shape: (batch_size, seq_len, window_size, feature_dim)
        # power.shape: (batch_size, seq_len)
        # power_feature.shape: (batch_size, seq_len, window_size, power_feature_dim)
        batch_size = input.size(0)
        device = input.device
        if debug:
            print("input.shape: ", input.shape)
            print("power.shape: ", power.shape)
            print("power_feature.shape: ", power_feature.shape)
            # print("label.shape: ", label.shape)
            # if src_key_padding_mask is not None:
            #     print("src_key_padding_mask.shape: ", src_key_padding_mask.shape)
        
        te1_output = self.tfe1_compute(input, batch_size, device, debug)
        
        # 取编码器第一个位置的预测张量
        # power_mlp_input.shape: (batch_size*seq_len, d_model1)
        power_mlp_input = te1_output[:, 0, :]
        if debug:
            print("power_mlp_input.shape: ", power_mlp_input.shape)
        
        power_preds = self.power_pred_compute(power_mlp_input, batch_size, debug)
        
        # TODO 一种方案是不去除cls头，将其作为功率特征融合进下一层；另一种方案是将预测的风速值添加到特征中
        # TODO 这里的方案是将预测功率与实际功率拼接，与原特征融合
        # 去除第一个cls头
        # reduction_input.shape: (batch_size*seq_len, window_size, d_model1)
        reduction_input = te1_output[:, 1:, :]
        if debug:
            print("reduction_input.shape: ", reduction_input.shape)
        
        # 转换为第二个Transformer维度
        reduction_output = self.tfe2_pre_compute(reduction_input, batch_size, debug)
        
        # 预测功率与原始功率转换成第二个Transformer维度
        power_fc_output = self.power_pred_transform(power_preds, power, debug)
        
        # no_power_list = ['power', 'r_square', 'r_wind_speed_to_power', 'torque', 'cp', 'ct']
        # 与功率有关的合成特征如何整合回去,1表示r_square，2表示r_wind_speed_to_power
        power_feature = power_feature[:,:,:,1:3]
        power_feature_view = power_feature.reshape(batch_size, self.seq_len, -1)
        power_feature_fc_output = self.power_feature_fc(power_feature_view)
        if debug:
            print("power_feature_fc_output.shape: ", power_feature_fc_output.shape)
        
        # 添加预测头
        cls_token2 = torch.zeros(batch_size, 1, self.d_model2, device=device)
        # TODO 功率的融合直接以隐向量相加的形式？有没有更好的融合形式
        # te2_input.shape: (batch_size, seq_len+1, d_model2)
        te2_input = self.pe2(torch.cat([cls_token2, reduction_output + power_fc_output + power_feature_fc_output], dim=1))
        if debug:
            print("te2_input.shape: ", te2_input.shape)
        
        # 第二个Transformer计算和结冰标签预测
        icing_preds = self.tfe2_compute_label_pred(te2_input, debug)
        
        return power_preds, icing_preds
        
if __name__ == '__main__':
    
    model = WindIcingModelV4(
        seq_len=10, window_size=64, feature_dim=26,
        h_dim=128,
        d_model1=256, n_head1=4, dim_feedforward1=256, num_layers1=4,
        d_model2=256, n_head2=4, dim_feedforward2=256, num_layers2=2,
        dropout=0.1, device=torch.device('cpu')
    )
    
    input = torch.zeros((5, 10, 64, 26))
    power = torch.zeros((5, 10))
    power_feature = torch.zeros((5, 10, 64, 6))
    model.my_forward(input, (power, power_feature), debug=True)
    model(input, (power, power_feature))