import torch
import subprocess

def print_gpu_usage():
    if torch.cuda.is_available():
        device = torch.cuda.current_device()
        gpu_name = torch.cuda.get_device_name(device)
        mem_allocated = torch.cuda.memory_allocated(device) / (1024 * 1024 * 1024)  # 转换为 GB
        mem_reserved = torch.cuda.memory_reserved(device) / (1024 * 1024 * 1024)  # 转换为 GB
        print(f"GPU: {gpu_name}")
        print(f"Memory Allocated: {mem_allocated:.2f} GB")
        print(f"Memory Reserved: {mem_reserved:.2f} GB")
    else:
        print("CUDA is not available. GPU cannot be used.")


def print_gpu_smi():
    result = subprocess.run(['nvidia-smi', '--query-gpu=memory.used,memory.total', '--format=csv,noheader'], 
                            capture_output=True, text=True)
    if result.returncode == 0:
        gpu_info = result.stdout.strip().split('\n')
        for idx, info in enumerate(gpu_info):
            used, total = info.split(',')
            print(f"GPU {idx}: {used} / {total}")
    else:
        print("Failed to run nvidia-smi")

def get_optimizer_grouped_parameters(
    model,
    weight_decay,
    lora_lr=5e-4,
    no_decay_name_list=[
        "bias", "layer_norm.weight", "layernorm.weight", "norm.weight",
        "ln_f.weight"
    ],
    lora_name_list=["lora_right_weight", "lora_left_weight"],
):
    optimizer_grouped_parameters = [
        {
            "params": [
                p for n, p in model.named_parameters()
                if (not any(nd in n.lower() for nd in no_decay_name_list)
                    and p.requires_grad and not any(nd in n.lower()
                                                    for nd in lora_name_list))
            ],
            "weight_decay":
            weight_decay,
        },
        {
            "params": [
                p for n, p in model.named_parameters()
                if (not any(nd in n.lower() for nd in no_decay_name_list)
                    and p.requires_grad and any(nd in n.lower()
                                                for nd in lora_name_list))
            ],
            "weight_decay":
            weight_decay,
            "lr":
            lora_lr
        },
        {
            "params": [
                p for n, p in model.named_parameters()
                if (any(nd in n.lower()
                        for nd in no_decay_name_list) and p.requires_grad)
            ],
            "weight_decay":
            0.0,
        },
    ]

    non_empty_groups = []
    for group in optimizer_grouped_parameters:
        if group["params"]:
            non_empty_groups.append(group)
    return non_empty_groups
