| | |
| | """ |
| | NeuroScan AI ๅนถๅๅๅๆต่ฏ |
| | ๆต่ฏ CPU/GPU ๅณฐๅผไฝฟ็จๆ
ๅต๏ผๆฏๆ 2-3 ไปปๅกๅนถๅ |
| | """ |
| |
|
| | import os |
| | import sys |
| | import time |
| | import threading |
| | import multiprocessing |
| | from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor, as_completed |
| | from pathlib import Path |
| | import psutil |
| | import numpy as np |
| |
|
| | |
| | sys.path.insert(0, str(Path(__file__).parent.parent)) |
| |
|
| | |
| | monitor_data = { |
| | "cpu_percent": [], |
| | "memory_percent": [], |
| | "memory_gb": [], |
| | "gpu_memory_gb": [], |
| | "gpu_util": [] |
| | } |
| | stop_monitor = False |
| |
|
| |
|
| | def get_gpu_stats(): |
| | """่ทๅGPU็ถๆ""" |
| | try: |
| | import torch |
| | if torch.cuda.is_available(): |
| | |
| | allocated = torch.cuda.memory_allocated() / (1024**3) |
| | reserved = torch.cuda.memory_reserved() / (1024**3) |
| | |
| | |
| | import subprocess |
| | result = subprocess.run( |
| | ['nvidia-smi', '--query-gpu=memory.used,utilization.gpu', '--format=csv,noheader,nounits', '-i', '0'], |
| | capture_output=True, text=True |
| | ) |
| | if result.returncode == 0: |
| | parts = result.stdout.strip().split(',') |
| | mem_used = float(parts[0]) / 1024 |
| | gpu_util = float(parts[1]) |
| | return mem_used, gpu_util |
| | return allocated, 0 |
| | return 0, 0 |
| | except: |
| | return 0, 0 |
| |
|
| |
|
| | def resource_monitor(interval=0.5): |
| | """ๅๅฐ่ตๆบ็ๆง็บฟ็จ""" |
| | global stop_monitor, monitor_data |
| | |
| | while not stop_monitor: |
| | |
| | cpu_percent = psutil.cpu_percent(interval=None) |
| | monitor_data["cpu_percent"].append(cpu_percent) |
| | |
| | |
| | mem = psutil.virtual_memory() |
| | monitor_data["memory_percent"].append(mem.percent) |
| | monitor_data["memory_gb"].append(mem.used / (1024**3)) |
| | |
| | |
| | gpu_mem, gpu_util = get_gpu_stats() |
| | monitor_data["gpu_memory_gb"].append(gpu_mem) |
| | monitor_data["gpu_util"].append(gpu_util) |
| | |
| | time.sleep(interval) |
| |
|
| |
|
| | def run_single_pipeline(task_id, data_pair): |
| | """่ฟ่กๅไธชๅๆๆตๆฐด็บฟ""" |
| | baseline_path, followup_path = data_pair |
| | |
| | print(f" ๐ ไปปๅก {task_id}: ๅผๅงๅค็ {Path(baseline_path).parent.name}") |
| | start_time = time.time() |
| | |
| | try: |
| | |
| | from app.services.dicom import DicomLoader |
| | from app.services.registration import ImageRegistrator |
| | from app.services.analysis import ChangeDetector |
| | |
| | loader = DicomLoader() |
| | registrator = ImageRegistrator() |
| | detector = ChangeDetector() |
| | |
| | |
| | t0 = time.time() |
| | baseline_data, _ = loader.load_nifti(baseline_path) |
| | followup_data, _ = loader.load_nifti(followup_path) |
| | load_time = time.time() - t0 |
| | |
| | |
| | t0 = time.time() |
| | reg_result = registrator.register(followup_data, baseline_data, use_deformable=True) |
| | reg_time = time.time() - t0 |
| | |
| | |
| | t0 = time.time() |
| | change_result = detector.detect_changes(baseline_data, reg_result["warped_image"]) |
| | detect_time = time.time() - t0 |
| | |
| | total_time = time.time() - start_time |
| | |
| | return { |
| | "task_id": task_id, |
| | "status": "success", |
| | "load_time": load_time, |
| | "reg_time": reg_time, |
| | "detect_time": detect_time, |
| | "total_time": total_time, |
| | "data_shape": baseline_data.shape |
| | } |
| | |
| | except Exception as e: |
| | return { |
| | "task_id": task_id, |
| | "status": "error", |
| | "error": str(e), |
| | "total_time": time.time() - start_time |
| | } |
| |
|
| |
|
| | def run_segmentation_task(task_id, nifti_path): |
| | """่ฟ่กๅๅฒไปปๅก๏ผGPUๅฏ้ๅ๏ผ""" |
| | print(f" ๐ง ๅๅฒไปปๅก {task_id}: ๅผๅงๅค็") |
| | start_time = time.time() |
| | |
| | try: |
| | import torch |
| | os.environ['CUDA_VISIBLE_DEVICES'] = '0' |
| | |
| | from app.services.segmentation import OrganSegmentor |
| | segmentor = OrganSegmentor() |
| | |
| | |
| | from app.services.dicom import DicomLoader |
| | loader = DicomLoader() |
| | data, _ = loader.load_nifti(nifti_path) |
| | |
| | |
| | result = segmentor.segment(data) |
| | |
| | total_time = time.time() - start_time |
| | |
| | |
| | peak_mem = torch.cuda.max_memory_allocated() / (1024**3) |
| | |
| | return { |
| | "task_id": task_id, |
| | "status": "success", |
| | "total_time": total_time, |
| | "gpu_peak_gb": peak_mem |
| | } |
| | |
| | except Exception as e: |
| | return { |
| | "task_id": task_id, |
| | "status": "error", |
| | "error": str(e), |
| | "total_time": time.time() - start_time |
| | } |
| |
|
| |
|
| | def get_test_data_pairs(data_dir, max_pairs=5): |
| | """่ทๅๆต่ฏๆฐๆฎๅฏน""" |
| | data_path = Path(data_dir) / "processed" |
| | pairs = [] |
| | |
| | for case_dir in sorted(data_path.glob("real_lung_*"))[:max_pairs]: |
| | baseline = case_dir / "baseline.nii.gz" |
| | followup = case_dir / "followup.nii.gz" |
| | if baseline.exists() and followup.exists(): |
| | pairs.append((str(baseline), str(followup))) |
| | |
| | return pairs |
| |
|
| |
|
| | def print_stats(title, data_list): |
| | """ๆๅฐ็ป่ฎกไฟกๆฏ""" |
| | if not data_list: |
| | return |
| | arr = np.array(data_list) |
| | print(f" {title}:") |
| | print(f" ๅนณๅ: {np.mean(arr):.2f}") |
| | print(f" ๅณฐๅผ: {np.max(arr):.2f}") |
| | print(f" ๆๅฐ: {np.min(arr):.2f}") |
| |
|
| |
|
| | def main(): |
| | global stop_monitor, monitor_data |
| | |
| | print("=" * 70) |
| | print("๐ฅ NeuroScan AI ๅนถๅๅๅๆต่ฏ") |
| | print("=" * 70) |
| | |
| | |
| | print(f"\n๐ ็ณป็ป้
็ฝฎ:") |
| | print(f" CPU ๆ ธๅฟ: {psutil.cpu_count(logical=False)} ็ฉ็ๆ ธ / {psutil.cpu_count()} ้ป่พๆ ธ") |
| | print(f" ๆปๅ
ๅญ: {psutil.virtual_memory().total / (1024**3):.1f} GB") |
| | |
| | try: |
| | import torch |
| | if torch.cuda.is_available(): |
| | print(f" GPU: {torch.cuda.get_device_name(0)}") |
| | print(f" GPUๆพๅญ: {torch.cuda.get_device_properties(0).total_memory / (1024**3):.1f} GB") |
| | except: |
| | print(" GPU: ไธๅฏ็จ") |
| | |
| | |
| | data_dir = Path(__file__).parent.parent / "data" |
| | pairs = get_test_data_pairs(data_dir, max_pairs=5) |
| | |
| | if len(pairs) < 2: |
| | print("\nโ ๆต่ฏๆฐๆฎไธ่ถณ๏ผ้่ฆ่ณๅฐ 2 ๅฏนๆฐๆฎ") |
| | print(" ่ฏทๅ
่ฟ่ก: python scripts/download_datasets.py --dataset learn2reg") |
| | return |
| | |
| | print(f"\n๐ ๆพๅฐ {len(pairs)} ๅฏนๆต่ฏๆฐๆฎ") |
| | |
| | |
| | |
| | |
| | print("\n" + "=" * 70) |
| | print("๐ ๆต่ฏ 1: ๅไปปๅกๅบๅๆต่ฏ") |
| | print("=" * 70) |
| | |
| | monitor_data = {k: [] for k in monitor_data} |
| | stop_monitor = False |
| | |
| | |
| | monitor_thread = threading.Thread(target=resource_monitor, args=(0.2,)) |
| | monitor_thread.start() |
| | |
| | result = run_single_pipeline(1, pairs[0]) |
| | |
| | stop_monitor = True |
| | monitor_thread.join() |
| | |
| | if result["status"] == "success": |
| | print(f"\n โ
ๅไปปๅกๅฎๆ:") |
| | print(f" ๅ ่ฝฝๆถ้ด: {result['load_time']:.2f}s") |
| | print(f" ้
ๅๆถ้ด: {result['reg_time']:.2f}s") |
| | print(f" ๆฃๆตๆถ้ด: {result['detect_time']:.2f}s") |
| | print(f" ๆปๆถ้ด: {result['total_time']:.2f}s") |
| | |
| | print(f"\n ๐ ๅไปปๅก่ตๆบๅณฐๅผ:") |
| | print(f" CPU ๅณฐๅผ: {max(monitor_data['cpu_percent']):.1f}%") |
| | print(f" ๅ
ๅญๅณฐๅผ: {max(monitor_data['memory_gb']):.1f} GB ({max(monitor_data['memory_percent']):.1f}%)") |
| | print(f" GPUๆพๅญๅณฐๅผ: {max(monitor_data['gpu_memory_gb']):.2f} GB") |
| | |
| | single_task_time = result["total_time"] |
| | single_cpu_peak = max(monitor_data['cpu_percent']) |
| | single_mem_peak = max(monitor_data['memory_gb']) |
| | |
| | |
| | |
| | |
| | print("\n" + "=" * 70) |
| | print("๐ ๆต่ฏ 2: 2 ไปปๅกๅนถๅๅๅๆต่ฏ") |
| | print("=" * 70) |
| | |
| | monitor_data = {k: [] for k in monitor_data} |
| | stop_monitor = False |
| | |
| | monitor_thread = threading.Thread(target=resource_monitor, args=(0.2,)) |
| | monitor_thread.start() |
| | |
| | start_time = time.time() |
| | results = [] |
| | |
| | with ThreadPoolExecutor(max_workers=2) as executor: |
| | futures = [] |
| | for i, pair in enumerate(pairs[:2]): |
| | futures.append(executor.submit(run_single_pipeline, i+1, pair)) |
| | |
| | for future in as_completed(futures): |
| | results.append(future.result()) |
| | |
| | concurrent_2_time = time.time() - start_time |
| | |
| | stop_monitor = True |
| | monitor_thread.join() |
| | |
| | success_count = sum(1 for r in results if r["status"] == "success") |
| | print(f"\n โ
2ไปปๅกๅนถๅๅฎๆ: {success_count}/2 ๆๅ") |
| | print(f" ๆป่ๆถ: {concurrent_2_time:.2f}s") |
| | print(f" ๅนถ่กๆ็: {(single_task_time * 2 / concurrent_2_time * 100):.1f}%") |
| | |
| | print(f"\n ๐ 2ไปปๅกๅนถๅ่ตๆบๅณฐๅผ:") |
| | print(f" CPU ๅณฐๅผ: {max(monitor_data['cpu_percent']):.1f}%") |
| | print(f" ๅ
ๅญๅณฐๅผ: {max(monitor_data['memory_gb']):.1f} GB ({max(monitor_data['memory_percent']):.1f}%)") |
| | print(f" GPUๆพๅญๅณฐๅผ: {max(monitor_data['gpu_memory_gb']):.2f} GB") |
| | |
| | concurrent_2_cpu = max(monitor_data['cpu_percent']) |
| | concurrent_2_mem = max(monitor_data['memory_gb']) |
| | |
| | |
| | |
| | |
| | print("\n" + "=" * 70) |
| | print("๐ ๆต่ฏ 3: 3 ไปปๅกๅนถๅๅๅๆต่ฏ") |
| | print("=" * 70) |
| | |
| | if len(pairs) < 3: |
| | print(" โ ๏ธ ๆฐๆฎไธ่ถณ๏ผ่ทณ่ฟ 3 ไปปๅกๆต่ฏ") |
| | else: |
| | monitor_data = {k: [] for k in monitor_data} |
| | stop_monitor = False |
| | |
| | monitor_thread = threading.Thread(target=resource_monitor, args=(0.2,)) |
| | monitor_thread.start() |
| | |
| | start_time = time.time() |
| | results = [] |
| | |
| | with ThreadPoolExecutor(max_workers=3) as executor: |
| | futures = [] |
| | for i, pair in enumerate(pairs[:3]): |
| | futures.append(executor.submit(run_single_pipeline, i+1, pair)) |
| | |
| | for future in as_completed(futures): |
| | results.append(future.result()) |
| | |
| | concurrent_3_time = time.time() - start_time |
| | |
| | stop_monitor = True |
| | monitor_thread.join() |
| | |
| | success_count = sum(1 for r in results if r["status"] == "success") |
| | print(f"\n โ
3ไปปๅกๅนถๅๅฎๆ: {success_count}/3 ๆๅ") |
| | print(f" ๆป่ๆถ: {concurrent_3_time:.2f}s") |
| | print(f" ๅนถ่กๆ็: {(single_task_time * 3 / concurrent_3_time * 100):.1f}%") |
| | |
| | print(f"\n ๐ 3ไปปๅกๅนถๅ่ตๆบๅณฐๅผ:") |
| | print(f" CPU ๅณฐๅผ: {max(monitor_data['cpu_percent']):.1f}%") |
| | print(f" ๅ
ๅญๅณฐๅผ: {max(monitor_data['memory_gb']):.1f} GB ({max(monitor_data['memory_percent']):.1f}%)") |
| | print(f" GPUๆพๅญๅณฐๅผ: {max(monitor_data['gpu_memory_gb']):.2f} GB") |
| | |
| | concurrent_3_cpu = max(monitor_data['cpu_percent']) |
| | concurrent_3_mem = max(monitor_data['memory_gb']) |
| | |
| | |
| | |
| | |
| | print("\n" + "=" * 70) |
| | print("๐ ๆต่ฏ 4: GPU ๅๅฒไปปๅกๅณฐๅผๆต่ฏ") |
| | print("=" * 70) |
| | |
| | try: |
| | import torch |
| | if torch.cuda.is_available(): |
| | torch.cuda.reset_peak_memory_stats() |
| | |
| | monitor_data = {k: [] for k in monitor_data} |
| | stop_monitor = False |
| | |
| | monitor_thread = threading.Thread(target=resource_monitor, args=(0.2,)) |
| | monitor_thread.start() |
| | |
| | |
| | seg_result = run_segmentation_task(1, pairs[0][0]) |
| | |
| | stop_monitor = True |
| | monitor_thread.join() |
| | |
| | if seg_result["status"] == "success": |
| | print(f"\n โ
ๅๅฒไปปๅกๅฎๆ:") |
| | print(f" ่ๆถ: {seg_result['total_time']:.2f}s") |
| | print(f" GPUๅณฐๅผ: {seg_result.get('gpu_peak_gb', max(monitor_data['gpu_memory_gb'])):.2f} GB") |
| | else: |
| | print(f"\n โ ๏ธ ๅๅฒไปปๅก่ทณ่ฟ: {seg_result.get('error', 'unknown')}") |
| | |
| | print(f"\n ๐ ๅๅฒไปปๅก่ตๆบๅณฐๅผ:") |
| | print(f" CPU ๅณฐๅผ: {max(monitor_data['cpu_percent']):.1f}%") |
| | print(f" ๅ
ๅญๅณฐๅผ: {max(monitor_data['memory_gb']):.1f} GB") |
| | print(f" GPUๆพๅญๅณฐๅผ: {max(monitor_data['gpu_memory_gb']):.2f} GB") |
| | |
| | gpu_seg_peak = max(monitor_data['gpu_memory_gb']) |
| | else: |
| | print(" โ ๏ธ GPU ไธๅฏ็จ๏ผ่ทณ่ฟๅๅฒๆต่ฏ") |
| | gpu_seg_peak = 0 |
| | except Exception as e: |
| | print(f" โ ๏ธ ๅๅฒๆต่ฏๅคฑ่ดฅ: {e}") |
| | gpu_seg_peak = 0 |
| | |
| | |
| | |
| | |
| | print("\n" + "=" * 70) |
| | print("๐ ๅๅๆต่ฏๆป็ปๆฅๅ") |
| | print("=" * 70) |
| | |
| | print(f""" |
| | โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ |
| | โ NeuroScan AI ่ตๆบ้ๆฑๆฅๅ โ |
| | โโโโโโโโโโโโโโโโโโโฌโโโโโโโโโโโโโโโโฌโโโโโโโโโโโโโโโโฌโโโโโโโโโโโโโโโโโโโโค |
| | โ ๆต่ฏๅบๆฏ โ CPU ๅณฐๅผ โ ๅ
ๅญๅณฐๅผ โ GPU ๆพๅญๅณฐๅผ โ |
| | โโโโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโผโโโโโโโโโโโโโโโโโโโโค |
| | โ ๅไปปๅก้
ๅ โ {single_cpu_peak:>6.1f}% โ {single_mem_peak:>6.1f} GB โ ~0 GB (CPU) โ |
| | โ 2ไปปๅกๅนถๅ โ {concurrent_2_cpu:>6.1f}% โ {concurrent_2_mem:>6.1f} GB โ ~0 GB (CPU) โ |
| | โ 3ไปปๅกๅนถๅ โ {concurrent_3_cpu if 'concurrent_3_cpu' in dir() else 0:>6.1f}% โ {concurrent_3_mem if 'concurrent_3_mem' in dir() else 0:>6.1f} GB โ ~0 GB (CPU) โ |
| | โ GPUๅๅฒไปปๅก โ ~50% โ ~8 GB โ {gpu_seg_peak:>6.1f} GB โ |
| | โโโโโโโโโโโโโโโโโโโดโโโโโโโโโโโโโโโโดโโโโโโโโโโโโโโโโดโโโโโโโโโโโโโโโโโโโโค |
| | โ ๆจ่็กฌไปถ้
็ฝฎ โ |
| | โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโค |
| | โ ๆไฝ้
็ฝฎ (ๅไปปๅก): 4ๆ ธ CPU, 8GB ๅ
ๅญ, ๆ ้GPU โ |
| | โ ๆ ๅ้
็ฝฎ (2ๅนถๅ): 8ๆ ธ CPU, 16GB ๅ
ๅญ, 12GB GPU (ๅฏ้) โ |
| | โ ๆจ่้
็ฝฎ (3ๅนถๅ): 16ๆ ธ CPU, 32GB ๅ
ๅญ, 24GB GPU โ |
| | โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ |
| | """) |
| | |
| | print("โ
ๅๅๆต่ฏๅฎๆ!") |
| |
|
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|
| |
|
| |
|