from __future__ import print_function
import os
import paddle
from paddle.vision.models import mobilenet_v1
import paddle.vision.transforms as T
from paddle.static import InputSpec
from paddleslim.dygraph import L1NormFilterPruner
from paddleslim.analysis import dygraph_flops

# ===================== 配置区 =====================
# 可根据实际情况灵活修改
BATCH_SIZE = 128
EPOCHS = 20
LEARNING_RATE = 0.1
INPUT_SIZE = 32          # 对应 CIFAR10，小于224
PRUNE_RATIO = 0.4
SEN_FILE = "./sen.pickle"
USE_GPU = True

def set_device(use_gpu=True):
    '''设置训练设备'''
    dev = 'gpu' if use_gpu and paddle.is_compiled_with_cuda() else 'cpu'
    paddle.set_device(dev)
    print(f"Using device: {dev}")

def get_model(pretrained=False):
    '''返回mobilenet_v1实例,适配CIFAR10输入与类别数'''
    # 注意mobilenet默认224输入，可以修改input_channels和num_classes
    model = mobilenet_v1(num_classes=10, pretrained=pretrained)
    return model

def get_transforms():
    '''数据增强管道'''
    transform = T.Compose([
        T.ToTensor(),
        T.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
        # 更多增强手段可增加
    ])
    return transform

def get_datasets(transform):
    '''构造训练集和验证集'''
    train_dataset = paddle.vision.datasets.Cifar10(mode="train", backend="cv2", transform=transform)
    val_dataset = paddle.vision.datasets.Cifar10(mode="test", backend="cv2", transform=transform)
    print(f'train samples count: {len(train_dataset)}')
    print(f'val samples count: {len(val_dataset)}')
    return train_dataset, val_dataset

def check_batches(dataset):
    '''简单检查样本与标签形状'''
    img, label = dataset[0]
    print(f'image shape: {img.shape}; label: {label}')

def build_model(net, input_size):
    '''返回Model对象，配置输入输出规格'''
    # input_size可根据数据集调整
    inputs = [InputSpec([None, 3, input_size, input_size], 'float32', name='image')]
    labels = [InputSpec([None, 1], 'int64', name='label')]
    return paddle.Model(net, inputs, labels)

def train_model(model, train_dataset, val_dataset, lr, epochs, batch_size, msg=''):
    '''训练与验证'''
    optimizer = paddle.optimizer.Momentum(
        learning_rate=lr,
        parameters=model.network.parameters())
    # 配置损失与评价指标
    model.prepare(
        optimizer,
        paddle.nn.CrossEntropyLoss(),
        paddle.metric.Accuracy(topk=(1, 5)))
    print(f"\n==== {msg} Training for {epochs} epochs ====")
    model.fit(
        train_dataset, 
        epochs=epochs, batch_size=batch_size, verbose=1
    )
    res = model.evaluate(val_dataset, batch_size=batch_size, log_freq=10)
    print(f"{msg} Val results: {res}")
    return res

def prune_and_finetune(model, net, val_dataset, batch_size, prune_ratio, input_shape, sen_file, msg=''):
    '''剪枝并微调'''
    pruner = L1NormFilterPruner(net, input_shape)
    # 敏感度分析
    if not os.path.exists(sen_file):
        print("Running sensitivity analysis (may take time)...")
        def eval_fn():
            val_res = model.evaluate(val_dataset, batch_size=batch_size)
            return val_res['acc_top1']
        pruner.sensitive(eval_func=eval_fn, sen_file=sen_file)    # 保存敏感性结果
    sens = pruner.sensitive()
    print(f"Sensitivity info loaded. Example: {list(sens.items())[:3]}")
    # 剪枝前FLOPs
    flops_before = dygraph_flops(net, input_shape)
    # 剪枝
    plan = pruner.sensitive_prune(prune_ratio, skip_vars=["conv2d_26.w_0"])
    flops_after = dygraph_flops(net, input_shape)
    print(f"FLOPs before pruning: {flops_before}")
    print(f"FLOPs after  pruning: {flops_after}")
    print(f"Pruned FLOPs Ratio: {round(plan.pruned_flops * 100, 2)}%")
    return plan

# ============ 入口 ============
def main():
    set_device(USE_GPU)
    net = get_model(pretrained=True)
    paddle.summary(net, (1, 3, INPUT_SIZE, INPUT_SIZE))

    transform = get_transforms()
    train_dataset, val_dataset = get_datasets(transform)
    check_batches(train_dataset)
    
    model = build_model(net, INPUT_SIZE)

    # ----------- 1. 训练原始模型 -----------
    train_model(model, train_dataset, val_dataset, LEARNING_RATE, EPOCHS, BATCH_SIZE, "Original")

    # ----------- 2. 剪枝（L1） -----------
    plan = prune_and_finetune(model, net, val_dataset, BATCH_SIZE, PRUNE_RATIO, [1, 3, INPUT_SIZE, INPUT_SIZE], SEN_FILE)

    # ----------- 3. 微调（剪枝后） -----------
    print("\n== Fine-tuning After Pruning ==")
    train_model(model, train_dataset, val_dataset, LEARNING_RATE, EPOCHS, BATCH_SIZE, "FT")

if __name__ == '__main__':
    main()