import torch
import torch.nn as nn
import torch.profiler
import time
import os
import copy
import sys
import argparse
from deepsignal3.mtms.mtm import MTM
from torch.cuda.amp import autocast
# 导入 ModelBiLSTM
try:
    from deepsignal3.models import ModelBiLSTM
except ImportError:
    print("WARNING: deepsignal3.models.ModelBiLSTM 未找到。", file=sys.stderr)
    ModelBiLSTM = None # 稍后检查

# 检查 CUDA 设备
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if device.type != 'cuda':
    print("WARNING: CUDA 设备不可用，torch.compile 无法发挥加速作用。请检查环境配置。", file=sys.stderr)
    # sys.exit(1) # 遵循原脚本的严格检查


# --- 兼容性与 Dynamo 配置 ---
try:
    import torch._dynamo as _dynamo
    _dynamo.config.capture_dynamic_output_shape_ops = True
    _dynamo.config.suppress_errors = True
except Exception:
    pass

def maybe_mark_step():
    """
    尝试调用 torch.compiler.cudagraph_mark_step_begin()。
    """
    if hasattr(torch.compiler, 'cudagraph_mark_step_begin'):
        torch.compiler.cudagraph_mark_step_begin()

# --- 1. 全局常量 (影响数据生成) ---
SIGNAL_LEN_S = 15  #
VOCAB_SIZE = 16
N_EMBED = 4


# --- 2. 辅助函数：输入和计时 ---

def create_dataset_for_model(L_S, num_samples, model_type):
    """
    创建指定数量的随机输入数据 (在 CPU 上)。
    根据 model_type 返回不同的数据元组。

    L_S: *总*序列长度 (例如 315)
    num_samples: 要生成的数据条目数 (例如 1024)
    """
    print(f"    Creating {model_type} dataset (N={num_samples}, L_S={L_S}) on CPU...")
    
    if model_type == 'mtm':
        # MTM 输入: (signals, kmer, x_mask, t, x_static)
        signals = torch.randn(num_samples, L_S, 1).float()
        kmer = torch.randint(0, VOCAB_SIZE, (num_samples, L_S)).long()
        C_MTM = 1 + N_EMBED
        x_mask = torch.zeros(num_samples, L_S, C_MTM).bool()
        t = torch.arange(L_S, device='cpu').repeat(num_samples, 1).long()
        x_static = torch.randint(0, 5, (num_samples, 1)).long()
        # 返回 CPU Tensors
        return signals, kmer, x_mask, t, x_static
    
    elif model_type == 'lstm':
        # LSTM 输入: (kmer, base_means, base_stds, base_signal_lens, signals)
        # L_S (例如 315) 是总长度 (L_seq * SIGNAL_LEN_S)
        seq_len = L_S // SIGNAL_LEN_S
        if L_S % SIGNAL_LEN_S != 0:
            raise ValueError(f"L_S ({L_S}) 不是 SIGNAL_LEN_S ({SIGNAL_LEN_S}) 的整数倍")
            
        # 1. kmer [N, seq_len]
        kmer = torch.randint(0, VOCAB_SIZE, (num_samples, seq_len)).long()
        
        # 2. base_means [N, seq_len, 1]
        base_means = torch.randn(num_samples, seq_len, 1).float()
        
        # 3. base_stds [N, seq_len, 1]
        base_stds = torch.rand(num_samples, seq_len, 1).float() # Std 必须为正
        
        # 4. base_signal_lens [N, seq_len, 1]
        base_signal_lens = torch.randint(5, 25, (num_samples, seq_len, 1)).float()
        
        # 5. signals [N, seq_len, SIGNAL_LEN_S] (SIGNAL_LEN_S=15)
        # 这里的 signals 应该是每个 k-mer 对应的一个信号特征向量
        signals = torch.randn(num_samples, seq_len, SIGNAL_LEN_S).float()
        
        # 返回 CPU Tensors
        return kmer, base_means, base_stds, base_signal_lens, signals
    
    else:
        raise ValueError(f"Unknown model_type for dataset creation: {model_type}")


# (Profiler 辅助函数 _safe_get_time_from_evt, _select_sort_key_for_table 保持不变)
def _safe_get_time_from_evt(evt):
    """ (与原脚本相同) """
    # ... (代码不变) ...
    cpu_candidates = [
        'cpu_time_total', 'self_cpu_time_total', 'cpu_time', 'self_cpu_time'
    ]
    cuda_candidates = [
        'cuda_time_total', 'self_cuda_time_total', 'cuda_time', 'self_cuda_time'
    ]
    cpu_us = 0
    cuda_us = 0
    for name in cpu_candidates:
        if hasattr(evt, name):
            val = getattr(evt, name)
            if val is not None:
                cpu_us = float(val)
                break
    for name in cuda_candidates:
        if hasattr(evt, name):
            val = getattr(evt, name)
            if val is not None:
                cuda_us = float(val)
                break
    return cpu_us, cuda_us

def _select_sort_key_for_table(prof):
    """ (与原脚本相同) """
    # ... (代码不变) ...
    candidates = ["self_cuda_time_total", "cuda_time_total", "cpu_time_total", "self_cpu_time_total"]
    kavg = prof.key_averages()
    if len(kavg) == 0:
        return None
    evt = kavg[0]
    for c in candidates:
        if hasattr(evt, c):
            return c
    return None

def run_profile(model, dataset, name, model_type, iterations=10, warmups=3, batch_size=16):
    """
    运行 Profiler。
    从 'dataset' (CPU Tensors) 中采样 'batch_size'。
    运行 'iterations' 次迭代进行计时。
    'model_type' 决定了如何解包数据和调用模型。
    """
    with autocast(dtype=torch.float16):
        # 从数据集中获取总样本数
        num_samples = dataset[0].shape[0]
        if num_samples < batch_size:
            print(f"Warning: num_samples ({num_samples}) < batch_size ({batch_size}). Using num_samples as batch_size.")
            batch_size = num_samples

        # 预热阶段 (Warm-up)
        for _ in range(warmups):
            with torch.no_grad():
                maybe_mark_step()
                
                # 1. 随机采样批次索引
                indices = torch.randint(0, num_samples, (batch_size,), device='cpu')
                
                # 2. 根据模型类型获取并调用
                if model_type == 'mtm':
                    signals_b = dataset[0][indices].to(device)
                    kmer_b = dataset[1][indices].to(device)
                    x_mask_b = dataset[2][indices].to(device)
                    t_b = dataset[3][indices].to(device)
                    x_static_b = dataset[4][indices].to(device)
                    model(signals_b, kmer_b, x_mask_b, t_b, x_static_b, True, True)
                
                elif model_type == 'lstm':
                    kmer_b = dataset[0][indices].to(device)
                    means_b = dataset[1][indices].to(device)
                    stds_b = dataset[2][indices].to(device)
                    lens_b = dataset[3][indices].to(device)
                    signals_b = dataset[4][indices].to(device)
                    model(kmer_b, means_b, stds_b, lens_b, signals_b)

        if device.type == 'cuda':
            torch.cuda.synchronize()

        # 计时阶段
        start_event = torch.cuda.Event(enable_timing=True) if device.type == 'cuda' else None
        end_event = torch.cuda.Event(enable_timing=True) if device.type == 'cuda' else None

        if device.type == 'cuda':
            start_event.record()
        
        t0_cpu = time.time() # 备用 CPU 计时
        for _ in range(iterations):
            with torch.no_grad():
                maybe_mark_step()
                
                # 1. 随机采样批次索引
                indices = torch.randint(0, num_samples, (batch_size,), device='cpu')
                
                # 2. 根据模型类型获取并调用
                if model_type == 'mtm':
                    signals_b = dataset[0][indices].to(device)
                    kmer_b = dataset[1][indices].to(device)
                    x_mask_b = dataset[2][indices].to(device)
                    t_b = dataset[3][indices].to(device)
                    x_static_b = dataset[4][indices].to(device)
                    model(signals_b, kmer_b, x_mask_b, t_b, x_static_b, True, True)
                
                elif model_type == 'lstm':
                    kmer_b = dataset[0][indices].to(device)
                    means_b = dataset[1][indices].to(device)
                    stds_b = dataset[2][indices].to(device)
                    lens_b = dataset[3][indices].to(device)
                    signals_b = dataset[4][indices].to(device)
                    model(kmer_b, means_b, stds_b, lens_b, signals_b)
        
        if device.type == 'cuda':
            end_event.record()
            torch.cuda.synchronize()
            elapsed_time_ms = start_event.elapsed_time(end_event) / iterations
        else:
            elapsed_time_ms = (time.time() - t0_cpu) * 1000.0 / iterations

        # 确保 log 目录存在
        log_dir = "./log"
        os.makedirs(log_dir, exist_ok=True)

        # 运行详细 Profiler
        try:
            prof = torch.profiler.profile(
                activities=[
                    torch.profiler.ProfilerActivity.CPU,
                    torch.profiler.ProfilerActivity.CUDA,
                ],
                schedule=torch.profiler.schedule(wait=1, warmup=2, active=5),
                on_trace_ready=torch.profiler.tensorboard_trace_handler(os.path.join(log_dir, f'profile_{name}')),
                record_shapes=True,
                profile_memory=True,
            )
        except Exception as e:
            print(f"Error creating profiler: {e}. Profiler-specific metrics will be skipped.")
            prof = None

        if prof:
            with prof as p:
                total_steps = 1 + 2 + 5
                for _ in range(total_steps):
                    with torch.no_grad():
                        maybe_mark_step()
                        
                        # 1. 随机采样批次索引
                        indices = torch.randint(0, num_samples, (batch_size,), device='cpu')
                        
                        # 2. 根据模型类型获取并调用
                        if model_type == 'mtm':
                            signals_b = dataset[0][indices].to(device)
                            kmer_b = dataset[1][indices].to(device)
                            x_mask_b = dataset[2][indices].to(device)
                            t_b = dataset[3][indices].to(device)
                            x_static_b = dataset[4][indices].to(device)
                            model(signals_b, kmer_b, x_mask_b, t_b, x_static_b, True, True)
                        
                        elif model_type == 'lstm':
                            kmer_b = dataset[0][indices].to(device)
                            means_b = dataset[1][indices].to(device)
                            stds_b = dataset[2][indices].to(device)
                            lens_b = dataset[3][indices].to(device)
                            signals_b = dataset[4][indices].to(device)
                            model(kmer_b, means_b, stds_b, lens_b, signals_b)
                            
                    p.step()
        
        # L_S 是通过 dataset[0] (signals 或 kmer) 的 shape[1] 确定的
        L_S_actual = dataset[0].shape[1]
        print(f"\n======== {name} (L_S={L_S_actual}, B={batch_size}) ========")
        print(f"**GPU/CPU 计时平均总延迟: {elapsed_time_ms:.3f} ms/iter**")

        if not prof:
            return

        # (其余 profiler 分析代码与原脚本相同)
        sort_key = _select_sort_key_for_table(prof)
        try:
            if sort_key is not None:
                print(prof.key_averages().table(sort_by=sort_key, row_limit=10))
            else:
                print(prof.key_averages().table(row_limit=10))
        except Exception:
            try:
                print(prof.key_averages())
            except Exception:
                pass

        cpu_time_total_us = 0.0
        cuda_time_total_us = 0.0
        for evt in prof.key_averages():
            if "ProfilerStep" in evt.key:
                continue
            c_us, g_us = _safe_get_time_from_evt(evt)
            cpu_time_total_us += c_us
            cuda_time_total_us += g_us

        if cuda_time_total_us == 0:
            try:
                for ev in prof.events():
                    if hasattr(ev, 'self_cpu_time_us'):
                        cpu_time_total_us += float(ev.self_cpu_time_us or 0.0)
                    if hasattr(ev, 'self_cuda_time_us'):
                        cuda_time_total_us += float(ev.self_cuda_time_us or 0.0)
            except Exception:
                pass

        print(f"Profiler 总时间 (approx): CPU={cpu_time_total_us/1000.0:.3f} ms, CUDA={cuda_time_total_us/1000.0:.3f} ms")
        print(f"(注意：Profiler 时间单位/精度随 PyTorch 版本而异)")


# --- 3. 编译策略 (与之前相同) ---

def _compile_strategy_mtm(model_to_compile, compile_mode="reduce-overhead"):
    """
    MTM 专属：选择性编译策略。
    """
    print(f"Applying 'selective' compile strategy (for MTM)...")
    
    compiled_model = copy.deepcopy(model_to_compile)
    compiled_model.eval()
    
    print(f"Compiling submodules using mode='{compile_mode}'...")

    if hasattr(compiled_model, 'inp_layer'):
        compiled_model.inp_layer = torch.compile(compiled_model.inp_layer, mode=compile_mode)
        print(" -> Compiled: inp_layer")

    if hasattr(compiled_model, 'mixers'):
        for i in range(len(compiled_model.mixers)):
            compiled_model.mixers[i] = torch.compile(compiled_model.mixers[i], mode=compile_mode)
        print(f" -> Compiled: {len(compiled_model.mixers)} TokenMixingLayer modules (mixers)")
    
    if hasattr(compiled_model, 'samplers'):
        print(f" -> Skipping compilation for {len(compiled_model.samplers)} DownsampleLayer modules (samplers) due to dynamic shapes.")
    
    if hasattr(compiled_model, 'cls_head'):
        compiled_model.cls_head = torch.compile(compiled_model.cls_head, mode=compile_mode)
        print(" -> Compiled: cls_head")
        
    return compiled_model

def _compile_strategy_generic(model_to_compile, compile_mode="reduce-overhead"):
    """
    其他模型的通用编译策略：编译整个模型。
    """
    print(f"Applying 'all' compile strategy (for {model_to_compile.__class__.__name__})...")
    compiled_model = copy.deepcopy(model_to_compile)
    compiled_model.eval()
    
    compiled_model = torch.compile(compiled_model, mode=compile_mode)
    print(f" -> Compiled: entire {model_to_compile.__class__.__name__} model")
    
    return compiled_model

# --- 4. 模型工厂 ---

def get_model_and_compile_strategy(args, seq_len):
    """
    根据 args.model 返回 (base_model, compile_strategy_function)
    'seq_len' (例如 21 或 5) 是 ModelBiLSTM 初始化所必需的。
    """
    if args.model == 'mtm':
        # --- MTM 专属参数 ---
        print(f"Loading MTM model (D_MODEL={args.d_model}, NUM_LAYERS={args.num_layers})")
        model_params = {
            'num_chn': 1 + N_EMBED,
            'd_static': 1,
            'num_cls': 2,
            'ratios': [2] * args.num_layers, # 动态匹配 num_layers
            'd_model': args.d_model,
            'r_hid': args.num_layers,
            'drop': 0.2,
            'norm_first': True,
            'down_mode': 'concat',
            'vocab_size': VOCAB_SIZE,
            'embedding_size': N_EMBED,
            'use_channel_attn': True,
            'use_mixer': True,
            'moe': True ,
            'use_swiglu': True
        }
        
        base_model = MTM(**model_params)
        if args.complete_try is False:
            compile_strategy = _compile_strategy_mtm
        else:
            compile_strategy = _compile_strategy_generic
        return base_model, compile_strategy

    elif args.model == 'lstm':
        # --- ModelBiLSTM 接口 ---
        if ModelBiLSTM is None:
            print("ERROR: ModelBiLSTM 未成功导入，无法继续。", file=sys.stderr)
            sys.exit(1)
            
        print(f"Loading ModelBiLSTM (SEQ_LEN={seq_len}, HID_RNN={args.d_model}, NUM_LAYERS={args.num_layers})")
        
        # 将 d_model 和 num_layers 映射到 ModelBiLSTM 的参数
        # (基于 train_worker 示例进行猜测)
        base_model = ModelBiLSTM(
            seq_len=seq_len,            # <--- 固定长度
            signal_len=SIGNAL_LEN_S,
            num_layers1=3,  # <--- 映射
            num_layers2=1,              # (固定为 1)
            num_classes=2,              # (固定为 2)
            dropout_rate=0.5,
            hidden_size=args.d_model,     # <--- 映射
            vocab_size=VOCAB_SIZE,
            embedding_size=N_EMBED,
            is_base=True,
            is_signallen=True,
            is_trace=True,
            module='both_bilstm',
        )
        compile_strategy = _compile_strategy_generic
        return base_model, compile_strategy
        
    else:
        raise ValueError(f"Unknown model type: {args.model}")


# --- 5. 主执行函数 ---

def main(args):
    print(f"--- 模型性能分析 (设备: {device}, 模型: {args.model}) ---")
    print(f"Params: Iter={args.iterations}, Warmup={args.warmups}, Batch={args.batch_size}, NumSamples={args.num_samples}")
    print(f"SeqLens: Orig={args.seq_len_orig}, New={args.seq_len_new}")

    # 计算 L_S (总长度)
    L_S_ORIG = args.seq_len_orig * SIGNAL_LEN_S
    L_S_NEW = args.seq_len_new * SIGNAL_LEN_S

    # 检查 PyTorch 版本
    try:
        ver_major = int(torch.__version__.split('.')[0])
    except Exception:
        ver_major = 0
    
    compile_mode = "reduce-overhead"

    # --- MTM 路径 (可变长) ---
    if args.model == 'mtm':
        print("\n[MTM 路径]: 创建 1 个模型，测试 2 种长度")
        
        # 1. 实例化 1 个模型
        base_model, compile_strategy = get_model_and_compile_strategy(args, args.seq_len_orig)
        base_model.to(device)
        base_model.eval()

        # 2. 准备 2 个数据集
        print(f"Creating inputs: L_ORIG={L_S_ORIG}, L_NEW={L_S_NEW}")
        DATASET_ORIG = create_dataset_for_model(L_S_ORIG, args.num_samples, 'mtm')
        DATASET_NEW = create_dataset_for_model(L_S_NEW, args.num_samples, 'mtm')

        # 3. 运行基线 (无编译)
        print("\n#################### 模式 1: 基线 (无编译) ####################")
        run_profile(base_model, DATASET_ORIG, "MTM_ORIG_NO_COMPILE", 'mtm', 
                    iterations=args.iterations, warmups=args.warmups, batch_size=args.batch_size)
        run_profile(base_model, DATASET_NEW, "MTM_NEW_NO_COMPILE", 'mtm', 
                    iterations=args.iterations, warmups=args.warmups, batch_size=args.batch_size)

        # 4. 运行编译
        if ver_major >= 2:
            print(f"\n#################### 模式 2: Torch.Compile 优化 (MTM 策略) ####################")
            
            # 编译 1 个模型
            compiled_model = compile_strategy(base_model, compile_mode=compile_mode)
            
            print("\nRunning initial compilation forward pass...")
            with torch.no_grad():
                # 使用 L_ORIG 的一个批次进行预编译
                indices = torch.randint(0, args.num_samples, (args.batch_size,), device='cpu')
                signals_b = DATASET_ORIG[0][indices].to(device)
                kmer_b = DATASET_ORIG[1][indices].to(device)
                x_mask_b = DATASET_ORIG[2][indices].to(device)
                t_b = DATASET_ORIG[3][indices].to(device)
                x_static_b = DATASET_ORIG[4][indices].to(device)
                compiled_model(signals_b, kmer_b, x_mask_b, t_b, x_static_b, True, True)

            run_profile(compiled_model, DATASET_ORIG, "MTM_ORIG_COMPILED", 'mtm',
                        iterations=args.iterations, warmups=args.warmups, batch_size=args.batch_size)
            run_profile(compiled_model, DATASET_NEW, "MTM_NEW_COMPILED", 'mtm',
                        iterations=args.iterations, warmups=args.warmups, batch_size=args.batch_size)
        else:
            print(f"WARNING: PyTorch 版本 ({torch.__version__}) 低于 2.0，无法运行 torch.compile。")

    # --- LSTM 路径 (固定长度) ---
    elif args.model == 'lstm':
        print("\n[LSTM 路径]: 创建 2 个模型 (L_ORIG, L_NEW)，分别测试")

        # --- L_ORIG ---
        print(f"\n--- 处理 L_ORIG (L_S={L_S_ORIG}) ---")
        model_orig, compile_strategy_orig = get_model_and_compile_strategy(args, args.seq_len_orig)
        model_orig.to(device)
        model_orig.eval()
        dataset_orig = create_dataset_for_model(L_S_ORIG, args.num_samples, 'lstm')
        
        print("\n#################### 模式 1: L_ORIG 基线 (无编译) ####################")
        run_profile(model_orig, dataset_orig, "LSTM_ORIG_NO_COMPILE", 'lstm', 
                    iterations=args.iterations, warmups=args.warmups, batch_size=args.batch_size)
        
        if ver_major >= 2:
            print(f"\n#################### 模式 2: L_ORIG Torch.Compile 优化 ####################")
            compiled_model_orig = compile_strategy_orig(model_orig, compile_mode=compile_mode)
            
            print("\nRunning initial compilation forward pass (L_ORIG)...")
            with torch.no_grad():
                indices = torch.randint(0, args.num_samples, (args.batch_size,), device='cpu')
                kmer_b = dataset_orig[0][indices].to(device)
                means_b = dataset_orig[1][indices].to(device)
                stds_b = dataset_orig[2][indices].to(device)
                lens_b = dataset_orig[3][indices].to(device)
                signals_b = dataset_orig[4][indices].to(device)
                compiled_model_orig(kmer_b, means_b, stds_b, lens_b, signals_b)

            run_profile(compiled_model_orig, dataset_orig, "LSTM_ORIG_COMPILED", 'lstm',
                        iterations=args.iterations, warmups=args.warmups, batch_size=args.batch_size)

        # --- L_NEW ---
        print(f"\n--- 处理 L_NEW (L_S={L_S_NEW}) ---")
        model_new, compile_strategy_new = get_model_and_compile_strategy(args, args.seq_len_new)
        model_new.to(device)
        model_new.eval()
        dataset_new = create_dataset_for_model(L_S_NEW, args.num_samples, 'lstm')

        print("\n#################### 模式 1: L_NEW 基线 (无编译) ####################")
        run_profile(model_new, dataset_new, "LSTM_NEW_NO_COMPILE", 'lstm', 
                    iterations=args.iterations, warmups=args.warmups, batch_size=args.batch_size)

        if ver_major >= 2:
            print(f"\n#################### 模式 2: L_NEW Torch.Compile 优化 ####################")
            compiled_model_new = compile_strategy_new(model_new, compile_mode=compile_mode)
            
            print("\nRunning initial compilation forward pass (L_NEW)...")
            with torch.no_grad():
                indices = torch.randint(0, args.num_samples, (args.batch_size,), device='cpu')
                kmer_b = dataset_new[0][indices].to(device)
                means_b = dataset_new[1][indices].to(device)
                stds_b = dataset_new[2][indices].to(device)
                lens_b = dataset_new[3][indices].to(device)
                signals_b = dataset_new[4][indices].to(device)
                compiled_model_new(kmer_b, means_b, stds_b, lens_b, signals_b)

            run_profile(compiled_model_new, dataset_new, "LSTM_NEW_COMPILED", 'lstm',
                        iterations=args.iterations, warmups=args.warmups, batch_size=args.batch_size)
            
        if ver_major < 2:
             print(f"WARNING: PyTorch 版本 ({torch.__version__}) 低于 2.0，无法运行 torch.compile。")


# --- 6. ArgParse 入口 ---

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="模型编译性能分析脚本")
    
    parser.add_argument('--model', type=str, default='mtm', choices=['mtm', 'lstm'],
                        help='要分析的模型类型 (mtm 或 lstm)')
    parser.add_argument('--complete-try', action="store_true", default=False, 
                        help="Whether to use compeleted compiled model in MTM. Default: False")
    # Profiler 参数
    parser.add_argument('--iterations', type=int, default=100,
                        help='Profiler 计时阶段的迭代 (批次) 次数')
    parser.add_argument('--warmups', type=int, default=10,
                        help='Profiler 计时前的预热迭代 (批次) 次数')
    
    # 数据集和批次大小
    parser.add_argument('--num-samples', type=int, default=1024,
                        help='生成的随机数据集的总大小 (总条目数)')
    parser.add_argument('--batch-size', type=int, default=16,
                        help='批处理大小 (Batch Size)')
    
    # 序列长度参数
    parser.add_argument('--seq-len-orig', type=int, default=21,
                        help='原始序列长度 (SEQ_LEN_ORIG)')
    parser.add_argument('--seq-len-new', type=int, default=5,
                        help='缩短后的序列长度 (SEQ_LEN_NEW)')

    # 模型超参数 (通用)
    parser.add_argument('--d-model', type=int, default=128,
                        help='模型维度 (MTM: d_model, LSTM: hid_rnn)')
    parser.add_argument('--num-layers', type=int, default=4,
                        help='模型层数 (MTM: r_hid, LSTM: layernum1)')
    
    args = parser.parse_args()
    
    # 检查设备
    if device.type != 'cuda':
        print("CRITICAL: CUDA device not found. Exiting.", file=sys.stderr)
        sys.exit(1)
        
    main(args)