# # Use a pipeline as a high-level helper
# from transformers import pipeline
#
# messages = [
#     {"role": "user", "content": "Who are you?"},
# ]
# pipe = pipeline("text-generation", model="deepseek-ai/DeepSeek-R1-Distill-Qwen-7B")
# pipe(messages)
import torch

# 检查 CUDA 是否可用
gpu_available = torch.cuda.is_available()
gpu_count = torch.cuda.device_count()

if gpu_available:
    print(f"GPU 可用，数量: {gpu_count}")
    print(f"当前 GPU: {torch.cuda.get_device_name(0)}")
else:
    print("没有可用的 GPU")