import torch
import torch.nn as nn
import torch.nn.functional as F

def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.normal_(0.0, 0.02)
    elif classname.find('BatchNorm') != -1:
        m.weight.data.normal_(1.0, 0.02)
        m.bias.data.fill_(0)

class FlowEncoder(nn.Module):
    def __init__(self):
        super().__init__()
        self.conv = nn.Sequential(
            nn.Conv2d(4, 16, 3, padding=1),  # 增加到4通道，包含管口掩码
            nn.ReLU(),
            nn.Conv2d(16, 1, 1),
            nn.Sigmoid()
        )
    
    def forward(self, x):
        return self.conv(x)

class CBAM(nn.Module):
    def __init__(self, gate_ch):
        super().__init__()
        self.ch_gate = nn.Sequential(
            nn.AdaptiveAvgPool2d(1),
            nn.Conv2d(gate_ch, gate_ch//8, 1),  # 调整通道缩减比例
            nn.ReLU(),
            nn.Conv2d(gate_ch//8, gate_ch, 1),
            nn.Sigmoid()
        )
        self.sp_gate = nn.Sequential(
            nn.Conv2d(2, 1, 3, padding=1),  # 改为3x3核，增强细粒度特征捕捉
            nn.Sigmoid()
        )
        
    def forward(self, x):
        ch = self.ch_gate(x)
        x = x * ch
        max_pool, _ = torch.max(x, 1, keepdim=True)
        avg_pool = torch.mean(x, 1, keepdim=True)
        sp = self.sp_gate(torch.cat([max_pool, avg_pool], 1))
        return x * sp


class PhysicsConstraint(nn.Module):
    def __init__(self):
        super().__init__()
        self.alpha = nn.Parameter(torch.tensor(0.1))
    
    def forward(self, p, v, mask=None):
        assert p.dim() == 4 and v.dim() == 4, "输入必须为4D张量"
        B, _, H, W = v.shape
        if p.shape[-2:] != (H, W):
            p = F.interpolate(p, size=(H, W), mode='bilinear', align_corners=False)
        
        print("p shape:", p.shape)
        print("v shape:", v.shape)
        print("mask shape:", mask.shape)
        
        # 计算压强梯度
        p_grad_x = p[:, :, :, 3:-3] - p[:, :, :, 1:-5]
        p_grad_y = p[:, :, 3:-3, :] - p[:, :, 1:-5, :]
        print("p_grad_x shape:", p_grad_x.shape)
        print("p_grad_y shape:", p_grad_y.shape)
        
        # 调整v的切片范围
        v_slice_x = v[:, 0:1, :, 3:-3]
        v_slice_y = v[:, 1:2, 3:-3, :]
        print("v_slice_x shape:", v_slice_x.shape)
        print("v_slice_y shape:", v_slice_y.shape)
        
        # 应用压强梯度调整
        v[:, 0:1, :, 3:-3] += self.alpha * p_grad_x
        v[:, 1:2, 3:-3, :] += self.alpha * p_grad_y
        
        # 添加散度约束
        if mask is not None:
            div_x = v[:, 0, :, 3:-3] - v[:, 0, :, 1:-5]
            div_y = v[:, 1, 3:-3, :] - v[:, 1, 1:-5, :]
            print("div_x shape:", div_x.shape)
            print("div_y shape:", div_y.shape)
            
            mask_x = mask[:, 0, :, 3:-3]
            mask_y = mask[:, 0, 3:-3, :]
            print("mask_x shape:", mask_x.shape)
            print("mask_y shape:", mask_y.shape)
            
            div_x = div_x * mask_x
            div_y = div_y * mask_y
            print("div_x after mask shape:", div_x.shape)
            print("div_y after mask shape:", div_y.shape)
            
            # 计算均值
            div_x_mean = torch.mean(div_x, dim=(1, 2), keepdim=True)  # 形状: [B, 1, 1]
            div_y_mean = torch.mean(div_y, dim=(1, 2), keepdim=True)
            print("div_x_mean shape:", div_x_mean.shape)
            print("div_y_mean shape:", div_y_mean.shape)
            
            # 增加通道维
            div_x = div_x.unsqueeze(1)  # 形状: [B, 1, 128, 122]
            div_y = div_y.unsqueeze(1)  # 形状: [B, 1, 122, 128]
            print("div_x after unsqueeze shape:", div_x.shape)
            print("div_y after unsqueeze shape:", div_y.shape)
            
            # 计算散度调整项，依赖自动广播
            div_x_correction = 0.1 * div_x_mean * div_x  # 形状: [B, 1, 128, 122]
            div_y_correction = 0.1 * div_y_mean * div_y  # 形状: [B, 1, 122, 128]
            print("div_x_correction shape:", div_x_correction.shape)
            print("div_y_correction shape:", div_y_correction.shape)
            
            # 应用散度约束
            v[:, 0:1, :, 3:-3] -= div_x_correction
            v[:, 1:2, 3:-3, :] -= div_y_correction
        
        return v

def blockUNet(in_c, out_c, name, transposed=False, bn=True, relu=True, size=4, pad=1, dropout=0.):
    block = nn.Sequential()
    if relu:
        block.add_module('%s_relu' % name, nn.ReLU(inplace=True))
    else:
        block.add_module('%s_leakyrelu' % name, nn.LeakyReLU(0.2, inplace=True))
    if not transposed:
        block.add_module('%s_conv' % name, 
                         nn.Conv2d(in_c, out_c, kernel_size=size, stride=2, padding=pad, bias=True))
    else:
        block.add_module('%s_upsam' % name, nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False))
        block.add_module('%s_tconv' % name, 
                         nn.Conv2d(in_c, out_c, kernel_size=(size-1), stride=1, padding=pad, bias=True))
    if bn:
        block.add_module('%s_bn' % name, nn.BatchNorm2d(out_c))
    if dropout > 0.:
        block.add_module('%s_dropout' % name, nn.Dropout2d(dropout, inplace=True))
    return block

class TurbNetG(nn.Module):
    def __init__(self, channelExponent=7, dropout=0.):
        super(TurbNetG, self).__init__()
        channels = int(2 ** channelExponent + 0.5)  # 增加到128通道
        
        self.flow_enc = FlowEncoder()
        self.cbam4 = CBAM(channels * 8)
        self.phys_constraint = PhysicsConstraint()
        
        self.layer1 = nn.Sequential(
            nn.Conv2d(5, channels, 4, 2, 1, bias=True),  # 调整为5通道，包含管口掩码
            nn.LeakyReLU(0.2)
        )
        
        self.layer2 = blockUNet(channels, channels*2, 'layer2', False, True, False, dropout=dropout)
        self.layer2b = blockUNet(channels*2, channels*2, 'layer2b', False, True, False, dropout=dropout)
        self.layer3 = blockUNet(channels*2, channels*4, 'layer3', False, True, False, dropout=dropout)
        self.layer4 = blockUNet(channels*4, channels*8, 'layer4', False, True, False, size=4, dropout=dropout)
        self.layer5 = blockUNet(channels*8, channels*8, 'layer5', False, True, False, size=2, pad=0, dropout=dropout)
        self.layer6 = blockUNet(channels*8, channels*8, 'layer6', False, False, False, size=2, pad=0, dropout=dropout)
        
        self.dlayer6 = blockUNet(channels*8, channels*8, 'dlayer6', True, True, True, dropout=dropout, size=2, pad=0)
        self.dlayer5 = blockUNet(channels*16, channels*8, 'dlayer5', True, True, True, dropout=dropout, size=2, pad=0)
        self.dlayer4 = blockUNet(channels*16, channels*4, 'dlayer4', True, True, True, dropout=dropout)
        self.dlayer3 = blockUNet(channels*8, channels*2, 'dlayer3', True, True, True, dropout=dropout)
        self.dlayer2b = blockUNet(channels*4, channels*2, 'dlayer2b', True, True, True, dropout=dropout)
        self.dlayer2 = blockUNet(channels*4, channels, 'dlayer2', True, True, True, size=4, pad=1, dropout=dropout)
        
        self.pressure_head = nn.Sequential(
            nn.Conv2d(channels*2, 32, 3, padding=1),
            nn.BatchNorm2d(32),
            nn.ReLU(),
            nn.Conv2d(32, 1, 1)
        )
        
        self.dlayer1 = nn.Sequential(
            nn.ReLU(inplace=True),
            nn.Conv2d(channels*2, 2, kernel_size=3, padding=1)
        )

    def forward(self, x, mask=None):
        flow_mask = self.flow_enc(x)
        x_in = torch.cat([x, flow_mask], 1)
        
        out1 = self.layer1(x_in)
        out2 = self.layer2(out1)
        out2b = self.layer2b(out2)
        out3 = self.layer3(out2b)
        out4 = self.layer4(out3)
        out4 = self.cbam4(out4)
        out5 = self.layer5(out4)
        out6 = self.layer6(out5)
        
        dout6 = self.dlayer6(out6)
        dout6_out5 = torch.cat([dout6, out5], 1)
        dout5 = self.dlayer5(dout6_out5)
        dout5_out4 = torch.cat([dout5, out4], 1)
        dout4 = self.dlayer4(dout5_out4)
        dout4_out3 = torch.cat([dout4, out3], 1)
        dout3 = self.dlayer3(dout4_out3)
        dout3_out2b = torch.cat([dout3, out2b], 1)
        dout2b = self.dlayer2b(dout3_out2b)
        dout2b_out2 = torch.cat([dout2b, out2], 1)
        dout2 = self.dlayer2(dout2b_out2)
        dout2_out1 = torch.cat([dout2, out1], 1)
        
        dout2_out1 = F.interpolate(dout2_out1, size=x.shape[-2:], mode='bilinear', align_corners=False)
        
        assert dout2_out1.shape[-2:] == x.shape[-2:], f"输出尺寸{dout2_out1.shape}与输入尺寸{x.shape}不匹配"
        
        p_pred = self.pressure_head(dout2_out1)
        v_pred = self.dlayer1(dout2_out1)
        
        # 再次插值，确保输出形状为 [B, C, 128, 128]
        p_pred = F.interpolate(p_pred, size=x.shape[-2:], mode='bilinear', align_corners=False)
        v_pred = F.interpolate(v_pred, size=x.shape[-2:], mode='bilinear', align_corners=False)
        
        assert p_pred.shape == (x.size(0), 1, x.size(2), x.size(3)), "压力场尺寸错误"
        assert v_pred.shape == (x.size(0), 2, x.size(2), x.size(3)), "速度场尺寸错误"

        v_corrected = self.phys_constraint(p_pred, v_pred, mask)
        
        return torch.cat([p_pred, v_corrected], 1)