
import torch
import torch.nn as nn
import torch.nn.functional as F
import os
class regressionModule(torch.nn.Module):
    def __init__(self, sizes):
        super(regressionModule, self).__init__()
        inChannels = sizes['enc']['op'][-1]
        self.max_pool = nn.AvgPool2d(kernel_size=2)

        self.c1 = nn.Conv2d(in_channels=inChannels,
                            out_channels=128,
                            bias=True,
                            kernel_size=(2,3))

        self.c2 = nn.Conv2d(in_channels=128,
                            out_channels=128,
                            bias=True,
                            kernel_size=3)

        self.c3 = nn.Conv2d(in_channels=128,
                            out_channels=32,
                            kernel_size=3,
                            bias=False)

        self.l1 = nn.Linear(32*3*5, 256, bias=True)
        self.l2 = nn.Linear(256, 10, bias=True)

        self.c_actfunc = torch.tanh # Center has to be between -1 and 1
        self.param_actfunc = torch.sigmoid # Parameters can't be negative and capped to 1

    def forward(self, x):
        B = x.shape[0]
        # x: [B, 192, H/16, W/16]
        x = F.leaky_relu(self.c1(x)) # [B, 256, 14, 18]
        x = self.max_pool(x) # [B, 256, 7, 9]
        x = F.leaky_relu(self.c2(x)) # [B, 256, 5, 7]
        x = F.leaky_relu(self.c3(x)) # [B, 32, 3, 5]
        x = x.x.view(B, 32 * 3 * 5)
        x = self.l2(torch.selu(self.l1(x)))

        pup_c = self.c_actfunc(x[:, 0:2])
        pup_param = self.param_actfunc(x[:, 2:4])
        pup_angle = x[:, 4]
        iri_c = self.c_actfunc(x[:, 5:7])
        iri_param = self.param_actfunc(x[:, 7:9])
        iri_angle = x[:, 9]


        op = torch.cat([pup_c,
                        pup_param,
                        pup_angle.unsqueeze(1),
                        iri_c,
                        iri_param,
                        iri_angle.unsqueeze(1)], dim=1)
        return op

class convBlock(nn.Module):
    def __init__(self, in_c, inter_c, out_c, actfunc):
        super(convBlock, self).__init__()
        self.conv1 = nn.Conv2d(in_c, inter_c, kernel_size=3, padding=1)
        
        self.conv2 = nn.Conv2d(inter_c, out_c, kernel_size=3, padding=1)
        self.actfunc = actfunc
        self.bn = torch.nn.BatchNorm2d(num_features=out_c)
    def forward(self, x):
        x = self.actfunc(self.conv1(x))
        x = self.conv2(x) # Remove x if not working properly
        x = self.bn(x)
        x = self.actfunc(x)
        return x

@torch.jit.ignore
def getSizes(chz, growth, blks=4):
    return {
        'enc': {
            'inter': [32, 64, 96, 128],
            'ip': [32, 38, 76, 115],
            'op': [38, 76, 115, 153]
        },
        'dec': {
            'skip': [243, 172, 102, 64],
            'ip': [153, 115, 76, 38],
            'op': [115, 76, 38, 32]
        }
        }

class Transition_down(nn.Module):
    def __init__(self, in_c, out_c, down_size, norm, actfunc):
        super(Transition_down, self).__init__()
        self.conv = nn.Conv2d(in_c, out_c, kernel_size=1, padding=0)
        self.max_pool = nn.AvgPool2d(kernel_size=down_size) if down_size else nn.Identity()
        self.norm = norm(num_features=in_c)
        self.actfunc = actfunc
    def forward(self, x):
        x = self.actfunc(self.norm(x))
        x = self.conv(x)
       
        x = self.max_pool(x)
        return x

class DenseNet2D_down_block(nn.Module):
    def __init__(self, in_c, inter_c, op_c, down_size, norm, actfunc):
        super(DenseNet2D_down_block, self).__init__()
        self.conv1 = nn.Conv2d(in_c, inter_c, kernel_size=3, padding=1)
        self.conv21 = nn.Conv2d(in_c+inter_c, inter_c, kernel_size=1, padding=0)
        self.conv22 = nn.Conv2d(inter_c, inter_c, kernel_size=3, padding=1)
        self.conv31 = nn.Conv2d(in_c+2*inter_c, inter_c, kernel_size=1, padding=0)
        self.conv32 = nn.Conv2d(inter_c, inter_c, kernel_size=3, padding=1)
        self.actfunc = actfunc
        self.bn = norm(num_features=in_c)
        self.TD = Transition_down(inter_c+in_c, op_c, down_size, norm, actfunc)

    def forward(self, x):
        x1 = self.actfunc(self.conv1(self.bn(x)))
        x21 = torch.cat([x, x1], dim=1)
        x22 = self.actfunc(self.conv22(self.conv21(x21)))
        x31 = torch.cat([x21, x22], dim=1)
        out = self.actfunc(self.conv32(self.conv31(x31)))
        out = torch.cat([out, x], dim=1)
        return out, self.TD(out)

class DenseNet2D_up_block(nn.Module):
    def __init__(self, skip_c, in_c, out_c, up_stride, actfunc):
        super(DenseNet2D_up_block, self).__init__()
        self.conv11 = nn.Conv2d(skip_c+in_c, out_c, kernel_size=1, padding=0)
        self.conv12 = nn.Conv2d(out_c, out_c, kernel_size=3, padding=1)
        self.conv21 = nn.Conv2d(skip_c+in_c+out_c, out_c, kernel_size=1,padding=0)
        self.conv22 = nn.Conv2d(out_c, out_c, kernel_size=3, padding=1)
        self.actfunc = actfunc
        self.up_stride = int(up_stride)
        self.upsample = nn.Upsample(scale_factor=self.up_stride, mode='nearest')

    def forward(self, prev_feature_map, x):
        # x = F.interpolate(x,
        #                   mode='nearest',
        #                   size=self.size
        #                   )
        #                   #scale_factor=self.up_stride)
        x = self.upsample(x)  # Upsample the input feature map
        x = torch.cat([x, prev_feature_map], dim=1)
        x1 = self.actfunc(self.conv12(self.conv11(x)))
        x21 = torch.cat([x, x1],dim=1)
        out = self.actfunc(self.conv22(self.conv21(x21)))
        return out

class DenseNet_encoder(nn.Module):
    def __init__(self, in_c=1, chz=32, actfunc=F.leaky_relu, growth=1.5, norm=nn.BatchNorm2d):
        super(DenseNet_encoder, self).__init__()
        sizes = getSizes(chz, growth)
        interSize = sizes['enc']['inter']
        opSize = sizes['enc']['op']
        ipSize = sizes['enc']['ip']

        self.head = convBlock(in_c=1,
                                inter_c=chz,
                                out_c=chz,
                                actfunc=actfunc)
        self.down_block1 = DenseNet2D_down_block(in_c=ipSize[0],
                                                 inter_c=interSize[0],
                                                 op_c=opSize[0],
                                                 down_size=2,
                                                 norm=norm,
                                                 actfunc=actfunc)
        self.down_block2 = DenseNet2D_down_block(in_c=ipSize[1],
                                                 inter_c=interSize[1],
                                                 op_c=opSize[1],
                                                 down_size=2,
                                                 norm=norm,
                                                 actfunc=actfunc)
        self.down_block3 = DenseNet2D_down_block(in_c=ipSize[2],
                                                 inter_c=interSize[2],
                                                 op_c=opSize[2],
                                                 down_size=2,
                                                 norm=norm,
                                                 actfunc=actfunc)
        self.down_block4 = DenseNet2D_down_block(in_c=ipSize[3],
                                                 inter_c=interSize[3],
                                                 op_c=opSize[3],
                                                 down_size=2,
                                                 norm=norm,
                                                 actfunc=actfunc)
        self.bottleneck = DenseNet2D_down_block(in_c=opSize[3],
                                                 inter_c=interSize[3],
                                                 op_c=opSize[3],
                                                 down_size=0,
                                                 norm=norm,
                                                 actfunc=actfunc)
    def forward(self, x):
        x = self.head(x) # chz
        skip_1, x = self.down_block1(x) # chz
        skip_2, x = self.down_block2(x) # 2 chz
        skip_3, x = self.down_block3(x) # 4 chz
        skip_4, x = self.down_block4(x) # 8 chz
        _, x = self.bottleneck(x)
        return skip_4, skip_3, skip_2, skip_1, x

class DenseNet_decoder(nn.Module):
    def __init__(self, chz, out_c, growth, actfunc=F.leaky_relu, norm=nn.BatchNorm2d):
        super(DenseNet_decoder, self).__init__()
        sizes = getSizes(chz, growth)
        skipSize = sizes['dec']['skip']
        opSize = sizes['dec']['op']
        ipSize = sizes['dec']['ip']

        self.up_block4 = DenseNet2D_up_block(skipSize[0], ipSize[0], opSize[0], 2, actfunc)
        self.up_block3 = DenseNet2D_up_block(skipSize[1], ipSize[1], opSize[1], 2, actfunc)
        self.up_block2 = DenseNet2D_up_block(skipSize[2], ipSize[2], opSize[2], 2, actfunc)
        self.up_block1 = DenseNet2D_up_block(skipSize[3], ipSize[3], opSize[3], 2, actfunc)

        self.final = convBlock(chz, chz, out_c, actfunc)

    def forward(self, skip4, skip3, skip2, skip1, x):
         
         x = self.up_block4(skip4, x)
         x = self.up_block3(skip3, x)
         x = self.up_block2(skip2, x)
         #训练层
         x = self.up_block1(skip1, x)
         o = self.final(x)
         return o

class DenseNet2D(nn.Module):
    def __init__(self,
                 chz=32,
                 growth=1.2,
                 actfunc=F.leaky_relu,
                 norm=nn.InstanceNorm2d,
                 selfCorr=False,
                 disentangle=False):
        super(DenseNet2D, self).__init__()

        self.sizes = getSizes(chz, growth)

        self.toggle = True
        self.selfCorr = selfCorr
        self.disentangle = disentangle
        self.disentangle_alpha = 2

        self.enc = DenseNet_encoder(in_c=1, chz=chz, actfunc=actfunc, growth=growth, norm=norm)
        self.dec = DenseNet_decoder(chz=chz, out_c=3, actfunc=actfunc, growth=growth, norm=norm)
        #self.elReg = regressionModule(self.sizes)


    def forward(self,
                x, # Input batch of images [B, 1, H, W]
                ): 
        #B, _, H, W = x.shape
        
        x4, x3, x2, x1, x = self.enc(x)
        op = self.dec(x4, x3, x2, x1, x)# [B, 3, H, W]
        
        #label+角膜和瞳孔
        
        return  op

def load_model(model_path   ,device):
    if not os.path.exists(model_path):
        raise FileNotFoundError(f"Model file {model_path} does not exist.")
    #device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    model = DenseNet2D().to(device)
    state_dict=torch.load(model_path, map_location=device)
    filtered_state_dict = {k: v for k, v in state_dict.items() if not k.startswith('elReg')}
    model.load_state_dict(filtered_state_dict)
    model.eval()  # Set the model to evaluation mode
    return model
import torch
import pandas as pd

def analyze_model_parameters(model):
    stats = []

    for name, param in model.named_parameters():
        if param.requires_grad:
            # 获取数据（建议在 cpu 上分析）
            data = param.data.cpu().numpy()
            layer_name = name.split('.')  # 去掉 .weight 或 .bias
            layer_name = '.'.join(layer_name[:-1]) if len(layer_name) > 1 else name

            stats.append({
                'layer': layer_name,
                'param_name': name,
                'min': data.min(),
                'max': data.max(),
                'mean': data.mean(),
                'std': data.std()
            })

    # 转换为 DataFrame
    df = pd.DataFrame(stats)
    return df

def quantiazation(model):
    pass

def replace_instance_norm_with_batch_norm(module):
    for name, child in module.named_children():
        if isinstance(child, nn.InstanceNorm2d):
            # 创建一个新的 BatchNorm2d 层，参数与 InstanceNorm2d 相同
            new_bn = nn.BatchNorm2d(
                num_features=child.num_features,
                eps=child.eps,
                momentum=child.momentum,
                affine=child.affine,
                track_running_stats=True  # 推理时需要 track running stats
            )
            # 复制权重和偏置
            if child.affine:
                new_bn.weight.data = child.weight.data.clone()
                new_bn.bias.data = child.bias.data.clone()
            # 注意：InstanceNorm2d 没有 running_mean 和 running_var，所以这里需要初始化
            # 可以初始化为 0 和 1，或者根据具体情况设置
            new_bn.running_mean.zero_()
            new_bn.running_var.fill_(1)
            # 替换模块
            setattr(module, name, new_bn)
        else:
            # 递归处理子模块
            replace_instance_norm_with_batch_norm(child)

if __name__ == "__main__":
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    model_path = '/home/wtpan/wcmx/model_dict/newglass_test_7.pth'
    model = load_model(model_path, device)
    model.eval()

    #replace_instance_norm_with_batch_norm(model)

    #df = analyze_model_parameters(model)

#    保存为 CSV 文件
    #df.to_csv('model_parameter_stats.csv', index=False)
    #torch.quantization.fuse_modules(model, [['dec.final.conv2', 'dec.final.bn'], ['enc.head.conv2','enc.head.bn']], inplace=True)
    #print(model)
    # netDict = torch.load('/home/wtpan/wcmx/EllSeg/weights/riteyes.git_ok')
    # model=DenseNet2D()
    # model.load_state_dict(netDict['state_dict'], strict=True)
    # model = model.to(device)
    
    # scripted_model = torch.jit.script(model)
    # torch.jit.save(scripted_model, "scripted_model.pt")
    # print("TorchScript 模型已保存。")

    # dummy_input = torch.randn(1, 1, 240, 240).to(device).to(torch.float32)
    # result=model(dummy_input)
    # traced_model = torch.jit.trace(model, dummy_input)
    # torch.jit.save(traced_model, "traced_model.pt")
    #计算模型运行时间
    dummy_input = torch.randn(1, 1, 240, 240).to(device).to(torch.float32)
    # flops = FlopCountAnalysis(model,dummy_input)

    # # 打印总FLOPs数
    # print(f"FLOPs: {flops.total()}")
    # # 测试模型运行时间
    result = model(dummy_input)
    print(result.shape)
    import time
    start_time= time.time()
    with torch.no_grad():
        for _ in range(100):  # 测试多次以获得平均时间
            result = model(dummy_input)
    end_time = time.time()
    print(f"模型运行时间: {(end_time - start_time) /100:.4f} 秒")