import onnxruntime as ort
import MNN
import torchvision.transforms as transforms
import time
import GPUtil
import psutil
import os
from datetime import datetime
import csv
from torch.utils.data import DataLoader
from torchvision.datasets import ImageFolder
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from Util.Return import *
from torchvision import datasets
import torch
from PIL import Image


class Appraiser:
    def __init__(self, model_input, test_output, json, model_id):
        self.model_input = model_input
        self.dataset_id = str(json.get('dataset_id', ''))
        self.device = json.get('device', 'cpu')
        self.input_size = json.get('input_size', 224)
        self.dataset_path = os.path.join(get_path(os.path.abspath(__file__), 2, get_config("model", "dataset_path")),
                                         self.dataset_id)
        self.model_id = model_id
        self.output_dir = test_output
        self.output_file = os.path.join(self.output_dir, f'{self.model_id}.csv')  # 使用 model_id 命名输出文件

        if not os.path.exists(self.dataset_path):
            raise FileNotFoundError(f"数据集路径未找到: {self.dataset_path}")

        self.time_stamps = []
        self.cpu_memory_usages = []
        self.gpu_memory_usages = []
        self.inference_results = []
        self.inference_times = []

        self.correct_predictions = 0
        self.total_predictions = 0

        self.start_time = datetime.now()

        self.model_format = os.path.splitext(model_input)[-1].lower()
        if self.model_format == '.onnx':
            self.providers = ['CPUExecutionProvider']
            if self.device == 'gpu' and ort.get_device() == 'GPU':
                self.providers = ['CUDAExecutionProvider']
            self.ort_session = ort.InferenceSession(self.model_input, providers=self.providers)

        elif self.model_format == '.mnn':
            self.mnn_interpreter, self.mnn_session, self.mnn_input_tensor = load_model(self.model_input)

        else:
            raise ValueError(f"Unsupported model format: {self.model_format}")

        transform = transforms.Compose([
            transforms.Resize((224, 224)),
            transforms.ToTensor()
        ])

        self.dataset = ImageFolder(self.dataset_path, transform=transform)
        self.dataloader = DataLoader(self.dataset, batch_size=1, shuffle=False)

        self.current_epoch = 0
        self.max_epochs = 1

    def start(self):
        print(f"Number of classes in dataset: {len(self.dataset.classes)}")
        print(f"Classes loaded: {self.dataset.classes}")

        while self.current_epoch < self.max_epochs:
            print(f"Starting epoch {self.current_epoch + 1}...")
            self.correct_predictions = 0
            self.total_predictions = 0

            mode = 'w' if self.current_epoch == 0 else 'a'
            write_header = self.current_epoch == 0

            with open(self.output_file, mode, newline='') as f:
                writer = csv.writer(f)
                if write_header:
                    writer.writerow(
                        ["时间", "CPU内存使用(GB)", "GPU内存使用(MB)", "推理时间(s)", "推理轮数", "原始图片名", "标签",
                         "预测结果"])

                for batch_idx, (inputs, labels) in enumerate(self.dataloader):
                    print(f"Processing batch {batch_idx + 1} with {len(inputs)} images.")
                    self.update_data(inputs, labels, batch_idx, writer)

            accuracy = self.compute_accuracy()
            print(f"Epoch {self.current_epoch + 1}: Total images: {self.total_predictions}")
            print(f"Epoch {self.current_epoch + 1}: Correctly predicted images: {self.correct_predictions}")
            print(f"Epoch {self.current_epoch + 1}: Accuracy: {accuracy:.2f}%")

            self.current_epoch += 1

        self.plot_final_global_graph()

        # 计算并返回结果
        return self.generate_evaluation_result()

    def update_data(self, inputs, labels, batch_idx, writer):
        start_time = time.time()

        if self.model_format == '.onnx':
            inputs = inputs.numpy()
            ort_inputs = {self.ort_session.get_inputs()[0].name: inputs}
            ort_outs = self.ort_session.run(None, ort_inputs)
            predictions = np.argmax(ort_outs[0], axis=1)

            detailed_predictions = []
            batch_start = batch_idx * len(inputs)
            for i, pred_index in enumerate(predictions):
                img_path, _ = self.dataloader.dataset.samples[batch_start + i]
                detailed_predictions.append({
                    'image': os.path.basename(img_path),
                    'label': labels[i].item(),
                    'prediction': pred_index
                })

        elif self.model_format == '.mnn':
            # 调用 evaluate_model 函数并获取预测结果
            batch_size = inputs.size(0)
            val_dataset_subset = [(self.dataset.samples[batch_idx * batch_size + i][0], labels[i].item())
                                  for i in range(batch_size)]
            detailed_predictions = evaluate_model(self.model_input, val_dataset_subset, self.input_size, batch_size)

        else:
            raise ValueError(f"Unsupported model format: {self.model_format}")

        end_time = time.time()

        for result in detailed_predictions:
            img_path = result['image']
            pred_index = result['prediction']
            label_index = result['label']

            self.total_predictions += 1
            if pred_index == label_index:
                self.correct_predictions += 1

            label_name = self.dataset.classes[label_index]
            label_formatted = f"{label_name}（{label_index}）"

            prediction_name = self.dataset.classes[pred_index] if pred_index < len(
                self.dataset.classes) else "Invalid Index"
            prediction_formatted = f"{prediction_name}（{pred_index}）"

            result_entry = {
                'epoch': self.current_epoch + 1,
                'image': img_path,
                'label': label_formatted,
                'prediction': prediction_formatted
            }
            self.inference_results.append(result_entry)

            cpu_memory_used = get_cpu_usage_and_memory()
            gpu_memory_used, _ = get_gpu_usage()

            writer.writerow([
                datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
                cpu_memory_used,
                gpu_memory_used,
                end_time - start_time,
                self.current_epoch + 1,
                result_entry['image'],
                result_entry['label'],
                result_entry['prediction']
            ])

        elapsed_time = datetime.now() - self.start_time
        current_time = self.start_time + elapsed_time

        self.time_stamps.append(current_time)
        self.cpu_memory_usages.append(cpu_memory_used)
        self.gpu_memory_usages.append(gpu_memory_used)
        self.inference_times.append(end_time - start_time)

    def compute_accuracy(self):
        if self.total_predictions == 0:
            return 0
        return (self.correct_predictions / self.total_predictions) * 100

    def plot_final_global_graph(self):
        plt.figure(figsize=(12, 8))

        plt.subplot(2, 1, 1)
        plt.plot(self.time_stamps, self.cpu_memory_usages, color='b', label='CPU Memory Usage (GB)')
        plt.xlabel('Time')
        plt.ylabel('CPU Memory Usage (GB)')
        plt.title('CPU Memory Usage Over Time')
        plt.legend()

        plt.subplot(2, 1, 2)
        plt.plot(self.time_stamps, self.gpu_memory_usages, color='r', label='GPU Memory Usage (MB)')
        plt.xlabel('Time')
        plt.ylabel('GPU Memory Usage (MB)')
        plt.title('GPU Memory Usage Over Time')
        plt.legend()

        for ax in plt.gcf().axes:
            ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
            ax.xaxis.set_major_locator(mdates.MinuteLocator(interval=1))
            ax.xaxis.set_minor_locator(mdates.SecondLocator(interval=10))
            plt.setp(ax.xaxis.get_majorticklabels(), rotation=45, ha='right')

        plt.tight_layout()
        plt.savefig(self.output_file.replace('.csv', '.png'))  # 使用 model_id 命名图像文件
        print(f"Final image saved to {self.output_file.replace('.csv', '.png')}")

    def generate_evaluation_result(self):
        # 计算平均 CPU 和 GPU 占用
        avg_cpu_memory_usage = np.mean(self.cpu_memory_usages) if self.cpu_memory_usages else 0
        avg_gpu_memory_usage = np.mean(self.gpu_memory_usages) if self.gpu_memory_usages else 0
        avg_memory_usage = np.mean([cpu + gpu for cpu, gpu in zip(self.cpu_memory_usages, self.gpu_memory_usages)]) if self.cpu_memory_usages and self.gpu_memory_usages else 0

        # 完整图像路径
        image_path = os.path.join(self.output_dir, f"{self.model_id}.png")

        # 创建评估结果字典
        evaluation_result = {
            "test_image": f"{self.model_id}.png",  # 返回完整路径
            "acc_value": f"{self.compute_accuracy():.2f}%",
            "cpu_occupancy": f"{avg_cpu_memory_usage:.2f}",
            "gpu_occupancy": f"{avg_gpu_memory_usage:.2f} MB",
            "memory_occupancy": f"{avg_memory_usage:.2f} MB"  # 改为内存大小
        }

        return evaluation_result


# 辅助函数
def get_gpu_usage():
    gpus = GPUtil.getGPUs()
    if gpus:
        gpu = gpus[0]
        return gpu.memoryUsed, gpu.memoryTotal
    else:
        return None, None


def get_cpu_usage_and_memory():
    process = psutil.Process(os.getpid())
    memory_info = process.memory_info()
    return memory_info.rss / (1024 ** 3)


# 加载MNN模型
def load_model(model_path):
    interpreter = MNN.Interpreter(model_path)
    session = interpreter.createSession()
    input_tensor = interpreter.getSessionInput(session)
    return interpreter, session, input_tensor


# 预处理图像
def preprocess_image(image_path, input_size):
    image = Image.open(image_path).convert('RGB')
    image = image.resize((input_size, input_size), Image.BILINEAR)
    image = np.array(image).astype(np.float32) / 255.0
    image = (image - 0.5) / 0.5  # 标准化
    image = image.transpose((2, 0, 1))  # 调整维度顺序
    image = image[np.newaxis, :]
    return image


# 评估模型准确性
def evaluate_model(model_path, val_dataset_subset, input_size, batch_size=1):
    interpreter, session, input_tensor = load_model(model_path)
    predictions = []  # 存储每个图像的预测结果

    for img_path, label in val_dataset_subset:
        img_data = preprocess_image(img_path, input_size)
        tmp_input = MNN.Tensor((1, 3, input_size, input_size), MNN.Halide_Type_Float, img_data,
                               MNN.Tensor_DimensionType_Caffe)
        input_tensor.copyFrom(tmp_input)
        interpreter.runSession(session)
        output_tensor = interpreter.getSessionOutput(session)
        output_data = np.array(output_tensor.getData())
        pred_label = np.argmax(output_data)

        # 记录每个图像的预测结果
        predictions.append({
            'image': os.path.basename(img_path),  # 图像的文件名
            'label': label,  # 实际标签
            'prediction': pred_label  # 预测结果
        })

    return predictions  # 返回详细的预测结果列表


def load_ImageNet(val_dir, batch_size=1):
    normalizer = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                      std=[0.229, 0.224, 0.225])

    val_dataset = datasets.ImageFolder(
        val_dir,
        transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            normalizer
        ])
    )

    val_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=batch_size,
        shuffle=False,
        num_workers=5,
        pin_memory=True
    )
    return val_loader, val_dataset
