import json
import torch
import os
import MNN
import random
import torch.nn as nn
import numpy as np
from PIL import Image
from tqdm import tqdm, trange
from torchvision import datasets, transforms
from torch.utils.data import DataLoader, Subset
from mnncompress.pytorch import low_rank_decompose
import multiprocessing
from onnxruntime.quantization import quantize_dynamic, QuantType
from ultralytics import YOLO
import onnx
from onnxruntime.quantization import quant_pre_process
import yaml
from Util.Return import get_config, get_path

F = MNN.expr
F.lazy_eval(True)


class Compressor:
    def __init__(self):
        pass

    def compress(self, model_input, model_output, config):

        algorithm_id = config.get("algorithm_id", 3)  # 提取algorithm_id
        if algorithm_id == 1:
            # 低秩分解 + 性能评估
            if model_output.endswith('.pth'):
                return LowRankDecompose(model_input, model_output, config).run(isEvaluate=True)

            elif model_output.endswith('.onnx'):
                # 输入一个临时文件用于格式转换
                tmp_model_path = os.path.splitext(model_input)[0] + "_onnxTmp" + ".pth"
                result = LowRankDecompose(model_input, tmp_model_path, config).run(isEvaluate=True)
                # 将模型转换为onnx的模型
                LowRankDecompose.convert_pth_to_onnx(tmp_model_path, model_output, config.get("input_size"))
                os.remove(tmp_model_path)
                return result
        elif algorithm_id == 2:
            # 离线量化 + 性能评估
            return ModelQuantizer(model_input, model_output, config).run(isEvaluate=True)
        elif algorithm_id == 3:
            # 低秩分解 + 离线量化 + 性能评估
            tmp_model_path = os.path.splitext(model_input)[0] + "_tmp" + os.path.splitext(model_input)[1]
            # 创建一个临时文件用于格式转换
            LowRankDecompose(model_input, tmp_model_path, config).run(isEvaluate=False)
            return ModelQuantizer(tmp_model_path, model_output, config).run(isEvaluate=True)
            os.remove(tmp_model_path)
        elif algorithm_id == 4:
            # yolo模型的动态量化
            return YoloQuantization(model_input, model_output, config).quantYoloModel()
        else:
            raise ValueError("Invalid algorithm_id. Please choose 1, 2, or 3.")


class YoloQuantization:
    # 定义输入模型路径和输出模型路径
    def __init__(self, model_path, compressed_model_path, config):
        self.model_path = model_path  # 压缩前的输入模型的路径
        self.compressed_model_path = compressed_model_path  # 压缩后模型的保存路径
        self.config = config  # 外部输入的json文件
        self.batch_size = config.get("yolo_batch_size", 8)  # 批处理大小
        self.input_size = config.get("input_size", 640)  # 输入的图像尺寸
        self.algorithm_id = config.get("algorithm_id", 4)  # 选用的压缩算法id，目前为yolo的动态量化算法
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")  # 选择算法运行的device
        train_path = os.path.join(
            get_path(os.path.abspath(__file__), 2, get_config("model", "dataset_path")),
            str(config.get("dataset_id")),
            "images",
            "train"
        )  # 生成训练集的路径
        # 这里的path可以根据实际的标签路径来,并没有对train里的图片用于训练，仅选做验证集
        yamlDict = {"train": train_path, "val": train_path, "nc": config.get("nc", 80), "name": []}
        # 生成写入临时yaml文件的地址
        yaml_file_path = train_path + ".yaml"
        self.yaml = yaml_file_path
        # 1. 将字典写入 yaml 临时文件
        with open(yaml_file_path, 'w') as file:
            yaml.dump(yamlDict, file)

    # 用于评估yolo模型压缩前后的map50，与体积比率
    def evaluateYoloModel(self, model_path1, model_path2, data_path, algorithm_id):
        def evaluate_model(model, yaml_path):
            """对模型进行评估，返回 mAP50 值"""
            results = model.val(data=yaml_path, device=self.device, save=False)
            return results.box.map50  # 获取 mAP50

        # 加载并评估第一个模型（未压缩的模型）
        model1 = YOLO(model_path1)
        pre_compression_acc = evaluate_model(model1, data_path)
        pre_compression_size = os.path.getsize(model_path1) / (1024 * 1024)

        # 加载并评估第二个模型（压缩后的模型）
        model2 = YOLO(model_path2)
        compressed_acc = evaluate_model(model2, data_path)
        compressed_size = os.path.getsize(model_path2) / (1024 * 1024)
        # 计算压缩比
        compression_ratio = compressed_size / pre_compression_size

        # 生成压缩结果
        compress_result = {
            "compress_algorithm_id": algorithm_id,
            "pre-compression_map50": f"{pre_compression_acc:.2f}",
            "compressed_map50": f"{compressed_acc:.2f}",
            "pre-compression_size": f"{pre_compression_size:.2f}MB",
            "compressed_size": f"{compressed_size:.2f}MB",
            "compression_ratio": f"{1-compression_ratio:.2f}"
        }
        print(compress_result)
        return compress_result

    # 动态量化的主函数
    def quantYoloModel(self):
        model = YOLO(self.model_path)
        # 将.pt模型导出为onnx格式的模型，保存在临时.onnx文件路径
        tmp_model_path = os.path.splitext(self.model_path)[0] + ".onnx"
        model.export(format="onnx", imgsz=self.input_size)
        # 加载ONNX模型
        model = onnx.load(tmp_model_path)

        # 进行量化预处理
        quant_pre_process(model, tmp_model_path)
        # 进行量化
        quantize_dynamic(tmp_model_path, self.compressed_model_path, weight_type=QuantType.QUInt8)
        # 进行评估
        result = self.evaluateYoloModel(self.model_path, self.compressed_model_path, self.yaml, self.algorithm_id)
        # 删除临时onnx文件与yaml文件
        os.remove(tmp_model_path)
        os.remove(self.yaml)
        return result


# 用于分类任务的低秩分解算法
class LowRankDecompose:
    def __init__(self, model_path, compressed_model_path, config):
        config = json.loads(config) if isinstance(config, str) else config

        self.model_path = model_path  # 压缩前的输入模型的路径
        self.compressed_model_path = compressed_model_path  # 压缩后的保存模型的路径
        self.dataset = os.path.join(get_path(os.path.abspath(__file__), 2, get_config("model", "dataset_path")),
                                    str(config.get("dataset_id")))  # 生成模型的数据集路径
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")  # 判断模型运行设备

        # 允许通过config传入参数，否则使用默认值
        self.tucker_minimal_ratio = config.get("tucker_minimal_ratio", 0.5)  # Tucker分解的最小比例，用于确定模型压缩时的阈值
        self.reserved_singular_value_ratio = config.get("reserved_singular_value_ratio",
                                                        0.7)  # 保留的奇异值比例，用于确定压缩过程中保留的奇异值数量
        self.epochs = config.get("lowrank_epochs", 3)  # 训练的轮次
        self.batch_size = config.get("lowrank_batch_size", 20)  # 每批次训练的数据量
        self.learning_rate = config.get("lowrank_learning_rate", 0.00001)  # 学习率，太高容易不稳定
        self.num_workers = config.get("num_workers", 3)  # 用于加载数据时的工作线程数量
        self.image_segment_ration = config.get("image_segment_ration", 2)  # 采用数据集的多少分之一用于训练,为1时则是用完整的数据集
        self.input_size = config.get("input_size", 224)  # 图像的输入尺寸
        self.algorithm_id = config.get("algorithm_id", 1)  # 算法id，为1时代表为图像分类的低秩分解算法
        self.sample_ratio = config.get("sample_ratio", 1)  # 采用数据集的多少分之一用于验证,为1时则是用完整的数据集
        self.model = None

    def load_model(self):
        self.model = torch.load(self.model_path)  # 加载模型

    def compress_model(self):
        # 通过低秩分解压缩模型
        self.model = low_rank_decompose(
            self.model,
            "compress_params.bin",
            tucker_minimal_ratio=self.tucker_minimal_ratio,
            reserved_singular_value_ratio=self.reserved_singular_value_ratio
        )
        torch.save(self.model, self.compressed_model_path)
        print("PyTorch 模型已成功保存至:", self.compressed_model_path)

    def load_ImageNet(self, path):
        # 加载数据集
        return datasets.ImageFolder(
            path,
            transforms.Compose([
                transforms.Resize(self.input_size),  # 调整为指定的输入尺寸
                transforms.RandomResizedCrop(self.input_size),
                transforms.RandomHorizontalFlip(),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
            ])
        )

    def load_test_data(self):
        # 实际加载测试数据集
        return DataLoader(datasets.ImageFolder(self.dataset, transforms.Compose([
            transforms.Resize(int(self.input_size * 1.14)),  # 调整为输入尺寸的1.14倍
            transforms.CenterCrop(self.input_size),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ])), batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers, pin_memory=True)

    # 用于评估微调中每个轮次结束后的模型准确率
    def evaluate(self, model, test_loader, criterion):
        model.eval()
        correct, total, running_loss = 0, 0, 0.0

        with torch.no_grad():
            for inputs, labels in test_loader:
                inputs, labels = inputs.to(self.device), labels.to(self.device)
                outputs = model(inputs)
                loss = criterion(outputs, labels)  # 计算损失值
                running_loss += loss.item()
                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)  # 计算总处理图像数量
                correct += (predicted == labels).sum().item()  # 计算正确预测图像数量

        accuracy = 100 * correct / total
        average_loss = running_loss / len(test_loader)
        return accuracy, average_loss

    def run(self, isEvaluate):
        # 加载和压缩模型
        self.load_model()
        self.compress_model()
        # 加载数据集
        train_dataset = self.load_ImageNet(self.dataset)
        test_loader = self.load_test_data()

        self.model.to(self.device)
        criterion = nn.CrossEntropyLoss()
        optimizer = torch.optim.Adam(self.model.parameters(), lr=self.learning_rate)
        best_accuracy = 0.0

        # 用于微调训练压缩后的模型
        for epoch in range(self.epochs):
            print(f"Epoch {epoch + 1}/{self.epochs} start")
            total, correct = 0, 0
            # 选择使用数据集的多少比例用于训练
            sample_size = len(train_dataset) // self.image_segment_ration
            # 基于sample_size随机的抽取对应数量的测试集
            train_subset = Subset(train_dataset, np.random.choice(len(train_dataset), sample_size, replace=False))
            train_loader = DataLoader(train_subset, batch_size=self.batch_size, shuffle=True,
                                      num_workers=self.num_workers, pin_memory=True)

            running_loss = 0.0
            progress_bar = tqdm(enumerate(train_loader), total=len(train_loader))
            for i, (inputs, labels) in progress_bar:
                inputs, labels = inputs.to(self.device), labels.to(self.device)
                optimizer.zero_grad()

                # 获取模型的主输出
                outputs = self.model(inputs)
                if isinstance(outputs, tuple):  # 检查是否是InceptionOutputs对象
                    outputs = outputs.logits  # 仅使用主输出进行损失计算

                loss = criterion(outputs, labels)
                loss.backward()
                optimizer.step()
                running_loss += loss.item()
                # 显示当前轮次信息
                progress_bar.set_description(f"Epoch [{epoch + 1}/{self.epochs}] Loss: {running_loss / (i + 1):.4f}")
                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()
            # 获取该轮次模型准确率
            accuracy, test_loss = self.evaluate(self.model, test_loader, criterion)
            print(f'Epoch [{epoch + 1}/{self.epochs}], Test Accuracy: {accuracy:.2f}%, Test Loss: {test_loss:.4f}')

            # 记录准确率并保存准确率最大的模型
            if accuracy > best_accuracy:
                best_accuracy = accuracy
                torch.save(self.model, self.compressed_model_path)
                print(f'New best model saved with accuracy: {best_accuracy:.2f}%')

        if isEvaluate:
            # 用于评估模型压缩前后的准确率和体积
            return ModelCompressor(self.model_path, self.compressed_model_path, self.dataset, self.algorithm_id,
                            self.input_size, self.sample_ratio).evaluationModel()

    # 用于实现模型压缩后的onnx格式导出
    @staticmethod
    def convert_pth_to_onnx(pth_model_path, onnx_model_path, input_size):
        model = torch.load(pth_model_path)
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        model.to(device)
        # 将模型移动到设备上
        model.eval()
        input_shape = (3, input_size, input_size)
        # 确保输入也在同一设备上
        dummy_input = torch.randn(1, *input_shape).to(device)
        torch.onnx.export(model, dummy_input, onnx_model_path, opset_version=11)
        print(f"ONNX 模型已导出到 {onnx_model_path}")


# 用于分类任务模型的离线量化
class ModelQuantizer:
    def __init__(self, model_path, compressed_model_path, config):
        config = json.loads(config) if isinstance(config, str) else config

        self.model_path = model_path  # 压缩前模型地址
        self.compressed_model_path = compressed_model_path  # 压缩后模型保存地址
        self.dataset = os.path.join(get_path(os.path.abspath(__file__), 2, get_config("model", "dataset_path")),
                                    str(config.get("dataset_id")))  # 获取数据集地址
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

        # 允许通过config传入参数，否则使用默认值
        self.algorithm_id = config.get("algorithm_id", 2)  # 读取algorithm_id “2”为分类任务模型的离线量化
        self.batch_size = config.get("offline_quant_batch_size", 1)  # 训练批次大小，大于1可能会导致mnn框架报错
        self.quant_bits = config.get("quant_bits", 8)  # 8比特量化
        self.input_size = config.get("input_size", 224)  # 动态输入尺寸
        self.sample_ratio = config.get("sample_ratio", 1)  # 用数据集的多少分之一用于后续评估

    @staticmethod
    def load_image(filename):
        img = Image.open(filename).convert('RGB')
        return np.array(img)

    @staticmethod
    def resize_image(image, shape):
        return np.array(Image.fromarray(image).resize(shape))

    # 生成校准数据集类，用于模型量化时的校准
    class CalibrationDataset(MNN.data.Dataset):
        def __init__(self, image_folder, num_images, input_size):
            # 初始化，image_folder为图片文件夹路径，num_images为随机选择的图片数量，input_size为图像输入尺寸
            super().__init__()
            self.image_list = self._get_image_list(image_folder, num_images)
            self.input_size = input_size

        def _get_image_list(self, image_folder, num_images):
            # 获取文件夹中的所有图片，随机选取num_images张
            all_images = [os.path.join(root, file)
                          for root, _, files in os.walk(image_folder)
                          for file in files if
                          file.endswith(('.png', '.PNG', '.jpg', '.JPG', '.jpeg', '.bmp', '.BMP', '.JPEG'))]
            return random.sample(all_images, num_images)

        def __getitem__(self, index):
            # 加载图片并进行中心裁剪和大小调整，标准化至[-1, 1]，转换为MNN表达式格式
            image_data = ModelQuantizer.load_image(self.image_list[index])
            image_data = self.center_crop(image_data, 0.875)
            image_data = ModelQuantizer.resize_image(image_data, (self.input_size, self.input_size))
            image_data = (image_data - 127.5) / 127.5
            dv = MNN.expr.const(image_data.flatten().tolist(), [self.input_size, self.input_size, 3],
                                MNN.expr.data_format.NHWC, MNN.expr.dtype.float)
            return [dv], []

        def __len__(self):
            # 返回图片数量
            return len(self.image_list)

        @staticmethod
        def center_crop(image_data, crop_factor):
            h, w, _ = image_data.shape
            h_size, w_size = int(h * crop_factor), int(w * crop_factor)
            h_start, w_start = (h - h_size) // 2, (w - w_size) // 2
            return image_data[h_start:h_start + h_size, w_start:w_start + w_size, :]

    # 用于确定数据格式N - Batch C - Channel H - Height W - Width
    @staticmethod
    def get_mnn_format(fmt):
        fmt_map = {'nchw': MNN.expr.NCHW, 'nhwc': MNN.expr.NHWC, 'nc4hw4': MNN.expr.NC4HW4}
        return fmt_map.get(fmt.lower(), ValueError("unknown format:", fmt))

    # 将模型格式从pth转为onnx
    @staticmethod
    def convert_pth_to_onnx(pth_model_path, input_size):
        model = torch.load(pth_model_path)
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        model.to(device)  # 将模型移动到设备上

        model.eval()
        onnx_model_path = os.path.join(os.path.dirname(pth_model_path),
                                       f"tmp_{os.path.basename(pth_model_path).replace('.pth', '')}.onnx")
        input_shape = (3, input_size, input_size)

        dummy_input = torch.randn(1, *input_shape).to(device)  # 确保输入也在同一设备上
        torch.onnx.export(model, dummy_input, onnx_model_path, opset_version=11)
        print(f"ONNX 模型已导出到 {onnx_model_path}")

        return onnx_model_path

    # 将模型格式从onnx转为mnn
    @staticmethod
    def convert_onnx_to_mnn(onnx_model_path, mnn_model_path, biz_code="biz"):
        os.system(
            f"mnnconvert --modelFile {onnx_model_path} --MNNModel {mnn_model_path} --framework ONNX --bizCode {biz_code}")
        print(f"MNN 模型已导出到 {mnn_model_path}")
        os.remove(onnx_model_path)

    @staticmethod
    def quant_func(net, dataloader, opt):
        # 对网络进行量化训练，通过遍历dataloader，进行前向传播并调用优化器的step更新。
        net.train(True)
        dataloader.reset()

        for i in trange(dataloader.iter_number):  # 逐步遍历dataloader中的数据
            example = dataloader.next()
            input_data = example[0]
            predicts = net.forward(input_data)  # 前向传播
            # 伪更新，仅用于保持优化器正常运行
            opt.step(F.const([0.0], []))
            for predict in predicts:
                predict.read()  # 读取预测值

    def quantize_mnn_model(self, mnn_model_path, quant_imgs, quant_model_path, batch_size):
        # 量化MNN模型，加载原始模型并应用校准数据集，进行8位量化，并保存量化后的模型
        calibration_dataset = ModelQuantizer.CalibrationDataset(image_folder=quant_imgs, num_images=10,
                                                                input_size=self.input_size)
        dataloader = MNN.data.DataLoader(calibration_dataset, batch_size=batch_size, shuffle=True)
        m = F.load_as_dict(mnn_model_path)  # 加载MNN模型
        inputs_outputs = F.get_inputs_and_outputs(m)  # 获取模型的输入输出
        input_names = list(inputs_outputs[0].keys())
        output_names = list(inputs_outputs[1].keys())

        inputs = [m[input_names[0]]]  # 获取输入
        outputs = [m[output_names[0]]]  # 获取输出

        input_placeholders = []
        for i in range(len(inputs)):
            shape = [batch_size, 3, self.input_size, self.input_size]  # 定义输入形状
            fmt = 'nchw'  # 设置输入格式
            nnn_format = ModelQuantizer.get_mnn_format(fmt)
            placeholder = F.placeholder(shape, nnn_format)  # 创建占位符
            placeholder.name = 'input'
            input_placeholders.append(placeholder)

        net = MNN.nn.load_module(inputs, outputs, True)  # 加载模型为可训练模块

        opt = MNN.optim.SGD(net, 0.01, 0.9, 0.0005)  # 设置优化器
        MNN.nn.compress.train_quant(net, quant_bits=8)  # 应用8位量化
        ModelQuantizer.quant_func(net, dataloader, opt)  # 执行量化训练过程

        net.train(False)  # 停止训练模式
        predicts = net.forward(input_placeholders)  # 前向传播得到量化后的预测
        MNN.expr.save(predicts, quant_model_path)  # 保存量化后的模型

    # MNN.expr.save(predicts, quant_model_path)的过程可能导致进程终端，于是通过子进程的方式去实现下一步的模型评估
    def run(self, isEvaluate):
        if isEvaluate:  # 判断是否需要输出压缩前后的对比
            child_process = multiprocessing.Process(target=self.main)  # 创建子进程
            child_process.start()
            return self.monitor_process(child_process)
            print("Main process continues execution.")
        else:
            multiprocessing.Process(target=self.main).start()

    def monitor_process(self, process):
        process.join()
        if process.exitcode != 0:  # 如过进程发生强制中断则运行如下的评估压缩代码
            return ModelCompressor(self.model_path, self.compressed_model_path, self.dataset, self.algorithm_id,
                                   self.input_size, self.sample_ratio).evaluationModel()

    def main(self):
        # mnn模型不支持.pth模型的直接量化，要通过转为onnx，再转为mnn的方式实现
        onnx_model_path = self.convert_pth_to_onnx(self.model_path, self.input_size)
        self.convert_onnx_to_mnn(onnx_model_path, self.compressed_model_path)
        return self.quantize_mnn_model(self.compressed_model_path, self.dataset, self.compressed_model_path,
                                       self.batch_size)


class ModelCompressor:
    def __init__(self, model_path, compressed_model_path, dataset, algorithm_id, input_size, sample_ratio):
        self.algorithm_id = algorithm_id  # 设置算法ID，用于后续输出使用
        self.model_path = model_path  # 原始模型路径
        self.compressed_model_path = compressed_model_path  # 压缩后模型保存路径
        self.val_dir = dataset  # 数据集路径，用于验证
        self.input_size = input_size  # 图像输入尺寸
        self.batch_size = 1  # 批量大小设为1，对于mnn模型大于1可能会报错
        self.sample_ratio = 10  # 样本采样率，默认为10
        self.val_loader, self.val_dataset = self.load_ImageNet(self.val_dir, self.batch_size)  # 加载数据集
        print("start evaluation")

    # 加载mnn模型
    def load_mnn_model(self, path):
        normalized_path = os.path.normpath(path)
        interpreter = MNN.Interpreter(normalized_path)

        session = interpreter.createSession()
        input_tensor = interpreter.getSessionInput(session)
        return interpreter, session, input_tensor

    # 加载.pth模型
    def load_pth_model(self, path):
        model = torch.load(path)
        model.eval()
        return model

    # 数据集预处理
    @staticmethod
    def preprocess_image(image_path, input_size):
        image = Image.open(image_path).convert('RGB').resize((input_size, input_size), Image.BILINEAR)
        image = np.array(image).astype(np.float32) / 255.0
        image = (image - 0.5) / 0.5
        return image.transpose((2, 0, 1))[np.newaxis, :]

    # 评估mnn模型性能，返回准确率
    def evaluate_mnn_model(self, path, input_size, batch_size):
        interpreter, session, input_tensor = self.load_mnn_model(path)
        correct, total = 0, 0

        # 获取验证集样本的一部分，根据sample_ratio进行采样
        sample_count = len(self.val_dataset.samples) // self.sample_ratio
        sampled_images = random.sample(self.val_dataset.samples, sample_count)

        for img_path, label in sampled_images:
            img_data = self.preprocess_image(img_path, input_size)  # 预处理图像
            # 创建MNN输入Tensor，维度为 (batch_size, 3, input_size, input_size)
            tmp_input = MNN.Tensor((batch_size, 3, input_size, input_size), MNN.Halide_Type_Float, img_data,
                                   MNN.Tensor_DimensionType_Caffe)
            input_tensor.copyFrom(tmp_input)  # 将预处理后的数据复制到模型的输入张量
            interpreter.runSession(session)  # 执行前向传播
            output_tensor = interpreter.getSessionOutput(session)
            pred_label = np.argmax(np.array(output_tensor.getData()))  # 获取预测的标签

            total += 1
            if pred_label == label:
                correct += 1
        return correct / total  # 返回模型在采样数据上的准确率

    # 评估pth模型性能，返回准确率
    def evaluate_pth_model(self, model, device):
        model.to(device)
        correct, total = 0, 0
        with torch.no_grad():
            for images, labels in self.val_loader:
                images, labels = images.to(device), labels.to(device)
                outputs = model(images)
                _, predicted = torch.max(outputs.data, 1)
                total += labels.size(0)
                correct += (predicted == labels).sum().item()

        return correct / total

    # 加载用于性能评估的数据集
    def load_ImageNet(self, val_dir, batch_size):
        val_dataset = datasets.ImageFolder(
            val_dir,
            transforms.Compose([
                transforms.Resize(int(self.input_size * 1.14)),
                transforms.CenterCrop(self.input_size),
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
            ])
        )

        return torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=False, num_workers=5,
                                           pin_memory=True), val_dataset

    def evaluationModel(self):
        # 评估压缩前的模型准确率，首先判断模型后缀确认模型格式
        if self.model_path.endswith('.pth'):
            device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
            # 获取模型准确率
            pre_compression_acc = self.evaluate_pth_model(self.load_pth_model(self.model_path), device)

        elif self.model_path.endswith('.mnn'):
            pre_compression_acc = self.evaluate_mnn_model(self.model_path, self.input_size, self.batch_size)
        else:
            raise ValueError("Unsupported model format!")

        # 评估压缩后的模型准确率，首先判断模型后缀确认模型格式
        if self.compressed_model_path.endswith('.pth'):
            device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
            # 获取模型准确率
            compressed_acc = self.evaluate_pth_model(self.load_pth_model(self.compressed_model_path), device)
        elif self.compressed_model_path.endswith('.mnn'):
            compressed_acc = self.evaluate_mnn_model(self.compressed_model_path, self.input_size, self.batch_size)
        else:
            raise ValueError("Unsupported model format!")

        # 获取压缩前后模型的体积大小
        pre_compression_size = os.path.getsize(self.model_path) / (1024 * 1024)
        compressed_size = os.path.getsize(self.compressed_model_path) / (1024 * 1024)
        # 得到压缩率
        compression_ratio = compressed_size / pre_compression_size

        compress_result = {
            "compress_algorithm_id": f"{self.algorithm_id}",
            "pre-compression_acc": f"{pre_compression_acc:.2f}",
            "compressed_acc": f"{compressed_acc:.2f}",
            "pre-compression_size": f"{pre_compression_size:.2f}MB",
            "compressed_size": f"{compressed_size:.2f}MB",
            "compression_ratio": f"{1-compression_ratio:.2f}"
        }
        print(compress_result)
        return compress_result
