import torch
import torchvision.models as models

# 加载预训练的VGG16模型
model = models.vgg16(pretrained=False)

# 修改模型的分类器部分以匹配你训练时的架构
model.classifier[6] = torch.nn.Linear(4096, 7)

# 加载你的模型权重
model.load_state_dict(torch.load(r'D:\develop\PythonCode\python基础\附_项目实战\九_薄膜图片级别分类\xgm\lab2_82\vgg16_best_model.pth'))

model.eval()


def format_number(number):
    """格式化浮点数为字符串，保留3位小数"""
    return f"{number:.3f}"

def extract_conv_layer_formula(layer, layer_index):
    W = layer.weight.data.numpy()
    b = layer.bias.data.numpy()
    in_channels = W.shape[1]
    out_channels = W.shape[0]

    formulas = []
    for i in range(out_channels):
        conv_sum = []
        for j in range(in_channels):
            kernel_values = W[i, j, :, :].flatten()
            kernel_str = " + ".join([f"{format_number(k)} * X^{layer_index}[{j}][{n}]"
                                     for n, k in enumerate(kernel_values)])
            conv_sum.append(kernel_str)
        conv_sum_str = " + ".join(conv_sum)
        formula = f"Y^{layer_index}[{i}] = ReLU({conv_sum_str} + {format_number(b[i])})"
        formulas.append(formula)

    return formulas

def extract_fc_layer_formula(layer, layer_index):
    W = layer.weight.data.numpy()
    b = layer.bias.data.numpy()
    input_dim = W.shape[1]
    output_dim = W.shape[0]

    formulas = []
    for i in range(output_dim):
        fc_sum = []
        for j in range(input_dim):
            fc_sum.append(f"{format_number(W[i, j])} * Z^{layer_index-1}[{j}]")
        fc_sum_str = " + ".join(fc_sum)
        formula = f"O^{layer_index}[{i}] = ReLU({fc_sum_str} + {format_number(b[i])})"
        formulas.append(formula)

    return formulas

# 文件名
output_file = "vgg16_带权重公式.txt"

with open(output_file, "w", encoding='utf-8') as f:
    # 写入说明
    f.write("VGG16 模型数学表达式（包含具体权重）\n")
    f.write("======================================\n\n")
    f.write("输入说明:\n")
    f.write("X^l: 输入到第 l 层的特征图（可以有多个通道）。\n")
    f.write("W^l: 第 l 层卷积或全连接操作的权重。\n")
    f.write("b^l: 第 l 层的偏置项。\n")
    f.write("Y^l: 经过 ReLU 激活后的卷积层输出。\n")
    f.write("Z^l: 从最后一个卷积层或池化层展平的输出（向量），作为全连接层的输入。\n")
    f.write("O^l: 经过 ReLU 激活后的全连接层输出。\n")
    f.write("ReLU(x): 修正线性单元激活函数，输出为 max(0, x)。\n\n")

    f.write("卷积层:\n")
    f.write("---------------------\n")
    layer_counter = 0
    for i, layer in enumerate(model.features):
        if isinstance(layer, torch.nn.Conv2d):
            formulas = extract_conv_layer_formula(layer, layer_counter)
            for formula in formulas:
                f.write(f"层 {layer_counter}: {formula}\n")
            layer_counter += 1
            f.write("\n")

    f.write("全连接层:\n")
    f.write("-----------------------\n")
    for i, layer in enumerate(model.classifier):
        if isinstance(layer, torch.nn.Linear):
            formulas = extract_fc_layer_formula(layer, i + layer_counter)
            for formula in formulas:
                f.write(f"层 {i + layer_counter}: {formula}\n")
            f.write("\n")

print(f"公式和说明已保存到 {output_file}")
