import json
import torch
from torchvision import transforms
from PIL import Image
from ExtendScripts.DicomAutoConvert.DicomQuality.model import resnet34  # 确保你定义的模型在 model.py 里
import os
import numpy as np
import nrrd
import matplotlib.pyplot as plt
import csv

def get_data(file_path):
    data, header = nrrd.read(file_path)
    return data

def pad_or_crop_depth(data, target_depth=100):
    current_depth = data.shape[2]
    if current_depth < target_depth:
        pad_depth = target_depth - current_depth
        data = np.pad(data, ((0, 0), (0, 0), (0, pad_depth)), mode='constant', constant_values=0)
    elif current_depth > target_depth:
        center = current_depth // 2
        half = target_depth // 2
        start = max(center - half, 0)
        end = start + target_depth
        if end > current_depth:
            end = current_depth
            start = end - target_depth
        data = data[:, :, start:end]
    return data

def max_intensity_projection(data):
    return np.max(data, axis=1)

def resize_mip_image(mip, target_shape=(512, 512)):
    h, w = mip.shape
    target_h, target_w = target_shape
    pad_h = max(target_h - h, 0)
    pad_w = max(target_w - w, 0)

    if pad_h > 0 or pad_w > 0:
        mip = np.pad(mip,
                     ((pad_h // 2, pad_h - pad_h // 2),
                      (pad_w // 2, pad_w - pad_w // 2)),
                     mode='constant', constant_values=0)

    h, w = mip.shape
    start_h = (h - target_h) // 2
    start_w = (w - target_w) // 2
    return mip[start_h:start_h + target_h, start_w:start_w + target_w]

def process_single_file(nrrd_file_path, output_png_path, target_depth=100):
    try:
        data = get_data(nrrd_file_path)
        data = pad_or_crop_depth(data, target_depth)
        mip = max_intensity_projection(data)
        mip = resize_mip_image(mip, target_shape=(512, 512))

        plt.imsave(output_png_path, mip, cmap='gray')
        # print(f"MIP saved to: {output_png_path}")
    except Exception as e:
        print(f"❌ Error processing {nrrd_file_path}: {e}")

def process_batch_nrrd(input_dir, output_dir, target_depth=100):
    os.makedirs(output_dir, exist_ok=True)
    for filename in os.listdir(input_dir):
        if filename.lower().endswith('.nrrd'):
            input_path = os.path.join(input_dir, filename)
            base_name = os.path.splitext(filename)[0]
            output_path = os.path.join(output_dir, base_name + ".png")
            process_single_file(input_path, output_path, target_depth)



def load_model(device, weights_path, num_classes=5):
    model = resnet34(num_classes=num_classes).to(device)
    model.load_state_dict(torch.load(weights_path, map_location=device))
    model.eval()
    return model


def preprocess_image(img_path, transform):
    img = Image.open(img_path).convert("RGB")
    img = transform(img)
    img = torch.unsqueeze(img, dim=0)  # 增加 batch 维度
    return img


def batch_predict_image_class(img_dir: str, output_csv: str):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    transform = transforms.Compose([
        transforms.ToTensor()
    ])

    # 加载类别标签
    json_path = './class_indices.json'
    assert os.path.exists(json_path), f"file: '{json_path}' does not exist."
    with open(json_path, "r") as f:
        class_indict = json.load(f)

    # 加载模型
    weights_path = "./resNet34.pth"
    assert os.path.exists(weights_path), f"file: '{weights_path}' does not exist."
    model = load_model(device, weights_path, num_classes=len(class_indict))

    # 批量预测
    results = []
    for filename in os.listdir(img_dir):
        if filename.lower().endswith(('.jpg', '.jpeg', '.png', '.bmp', '.tif', '.tiff')):
            img_path = os.path.join(img_dir, filename)
            try:
                img = preprocess_image(img_path, transform).to(device)
                with torch.no_grad():
                    output = torch.squeeze(model(img)).cpu()
                    predict = torch.softmax(output, dim=0)
                    predict_cla = torch.argmax(predict).item()
                    predicted_class = class_indict[str(predict_cla)]
                    results.append((filename, predicted_class))
                    print(f"{filename} -> {predicted_class}")
            except Exception as e:
                print(f"❌ Error processing {filename}: {e}")
                results.append((filename, "Error"))

    # 写入 CSV
    with open(output_csv, mode='w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(["ImageName", "PredictedClass"])
        writer.writerows(results)

    print(f"\n✅ Prediction results saved to: {output_csv}")


if __name__ == '__main__':
    nrrd_folder = '/mnt/826dce2d-17d8-4c44-9d9e-601a2b3b83c5/qualityRawData/Raw'
    png_path= '/mnt/826dce2d-17d8-4c44-9d9e-601a2b3b83c5/qualityRawData/png'
    os.makedirs(png_path, exist_ok=True)
    csv_path='/mnt/826dce2d-17d8-4c44-9d9e-601a2b3b83c5/qualityRawData/predict_csv.csv'
    process_batch_nrrd(nrrd_folder, png_path, target_depth=100)
    batch_predict_image_class(png_path,csv_path)



