import os
import pdb
import warnings
import multiprocessing as mp
mp.set_start_method('spawn', force=True)
import glob
from tqdm import tqdm
from typing import cast

import numpy as np
import SimpleITK as sitk



warnings.filterwarnings("ignore", module="pydicom", category=UserWarning)


def process_gpu_task(gpu_id, file_list, args):
    os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
    import torch
    from itkit.mm.mgam_models import mgam_Seg3D_Lite
    from itkit.mm.inference import SegInferencer
    print(f"Process {os.getpid()} using GPU {gpu_id}, processing {len(file_list)} files")
    
    class Inferencer_Seg3D(SegInferencer):
        def __init__(self, ww:int, wl:int, *args, **kwargs):
            super().__init__(*args, **kwargs)
            self.ww = ww # window width
            self.wl = wl # window level
            self.model: mgam_Seg3D_Lite # type: ignore

        def _preprocess(self, image_array:np.ndarray) -> np.ndarray:
            left = self.wl - self.ww/2
            right = self.wl + self.ww/2
            image_array = np.clip(image_array.astype(np.int16), left, right)
            image_array = (image_array - left) / self.ww
            return image_array

        @torch.inference_mode()
        def Inference_FromNDArray(self, image_array:np.ndarray) -> np.ndarray:
            assert image_array.ndim == 3, "Input image must be (Z, Y, X), got: {}.".format(image_array.shape)
            inputs = self._preprocess(image_array) # [Z, Y, X]
            inputs = inputs.astype(np.float16 if self.fp16 else np.float32)
            inputs = torch.from_numpy(inputs[None,None]).cuda()
            with torch.autocast('cuda'):
                logits = self.model.slide_inference(inputs)[0] # [N,C,Z,Y,X] -> [C,Z,Y,X]
            return logits.cpu().numpy() # [C, Z, Y, X]

    inferencer = Inferencer_Seg3D(
        ww=args.ww,
        wl=args.wl,
        cfg_path=args.cfg_path,
        ckpt_path=args.ckpt_path,
        fp16=args.fp16,
        allow_tqdm=True
    )

    os.makedirs(args.output, exist_ok=True)
    for file_path in tqdm(file_list,
                          dynamic_ncols=True,
                          leave=False,
                          mininterval=1,
                          position=gpu_id,
                          desc=f"GPU {gpu_id} Processing"):
        try:
            seriesUID = os.path.basename(file_path)
            output_path = os.path.join(args.output, seriesUID)
            if os.path.exists(output_path):
                print(f"File {output_path} already exists, skipping.")
                continue
            
            itk_image = sitk.ReadImage(file_path)
            image_array = sitk.GetArrayFromImage(itk_image)
            pred_logits = inferencer.Inference_FromNDArray(image_array)
            pred_sem_seg = pred_logits.argmax(axis=0).astype(np.uint8) # [Class, Z, Y, X] -> [Z, Y, X]
            itk_pred = sitk.GetImageFromArray(pred_sem_seg)
            itk_pred.CopyInformation(itk_image)
            sitk.WriteImage(itk_pred, output_path, True)
        
        except Exception as e:
            import traceback
            traceback.print_exc()
            print(f"Error processing {file_path}: {e}")


def parse_args():
    import argparse
    parser = argparse.ArgumentParser(description='Inferencer')
    parser.add_argument('input_root', type=str, help='input folder path')
    parser.add_argument('output', type=str, help='Output folder path')
    parser.add_argument('--cfg-path', type=str, help='Config file path',
                        default="/home/pc_stu4/liver-segmentation/Dr_Peng/configs/1.0.1.result_1/SegFormer3D.py")
    parser.add_argument('--ckpt-path', type=str, help='Checkpoint file path',
                        default="/home/pc_stu4/liver-segmentation/Dr_Peng/work_dirs/1.0.1.result_1/SegFormer3D/best_Perf_mDice_iter_100000.pth")
    parser.add_argument('--gpus', type=int, default=4, help='Number of GPUs to use')
    parser.add_argument('--ww', type=int, default=300, help='Window width')
    parser.add_argument('--wl', type=int, default=50, help='Window level')
    parser.add_argument('--fp16', action='store_true', default=False, help='Use FP16 precision')
    return parser.parse_args()

def main():
    args = parse_args()
    
    # 获取输入文件列表
    input_files = []
    for ext in ['*.mha', '*.nii', '*.nii.gz', '*.dcm']:
        input_files.extend(glob.glob(os.path.join(args.input_root, ext)))
    
    if not input_files:
        raise ValueError(f"No input files found in {args.input_root}")
    
    print(f"Found {len(input_files)} files to process")
    
    # 如果只有一个GPU或一个文件，直接处理
    if args.gpus == 1 or len(input_files) == 1:
        os.environ["CUDA_VISIBLE_DEVICES"] = "0"
        process_gpu_task(0, input_files, args)
        return
    
    # 将文件列表分割为多个部分
    files_per_gpu = []
    avg_files = len(input_files) // args.gpus
    remainder = len(input_files) % args.gpus
    
    start_idx = 0
    for i in range(args.gpus):
        # 分配文件，考虑余数
        num_files = avg_files + (1 if i < remainder else 0)
        end_idx = start_idx + num_files
        files_per_gpu.append(input_files[start_idx:end_idx])
        start_idx = end_idx
    
    # 创建并启动进程
    processes = []
    for gpu_id in range(args.gpus):
        p = mp.Process(
            target=process_gpu_task,
            args=(gpu_id, files_per_gpu[gpu_id], args)
        )
        p.start()
        processes.append(p)
    
    # 等待所有进程完成
    for p in processes:
        p.join()



if __name__ == '__main__':
    main()
