Spaces:
Running
on
Zero
Running
on
Zero
| import torch | |
| from app.logger_config import logger as logging | |
| import hmac | |
| import hashlib | |
| import base64 | |
| import os | |
| import time | |
| import random | |
| def debug_current_device(): | |
| """Safely logs GPU or CPU information without crashing on stateless GPU.""" | |
| logging.debug("=== Debugging current device ===") | |
| try: | |
| if torch.cuda.is_available(): | |
| device_name = torch.cuda.get_device_name(0) | |
| memory_allocated = torch.cuda.memory_allocated(0) / (1024 ** 2) | |
| memory_reserved = torch.cuda.memory_reserved(0) / (1024 ** 2) | |
| memory_total = torch.cuda.get_device_properties(0).total_memory / (1024 ** 2) | |
| capability = torch.cuda.get_device_capability(0) | |
| current_device = torch.cuda.current_device() | |
| logging.debug(f"GPU name : {device_name}") | |
| logging.debug(f"Current device ID : {current_device}") | |
| logging.debug(f"CUDA capability : {capability}") | |
| logging.debug(f"Memory allocated : {memory_allocated:.2f} MB") | |
| logging.debug(f"Memory reserved : {memory_reserved:.2f} MB") | |
| logging.debug(f"Total memory : {memory_total:.2f} MB") | |
| else: | |
| logging.debug("No GPU detected, running on CPU") | |
| except RuntimeError as e: | |
| # Handles Hugging Face Spaces “Stateless GPU” restriction | |
| if "CUDA must not be initialized" in str(e): | |
| logging.warning("⚠️ Skipping CUDA info: Stateless GPU environment detected.") | |
| else: | |
| logging.error(f"Unexpected CUDA error: {e}") | |
| def get_current_device(): | |
| """Returns the current device safely.""" | |
| try: | |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| device_name = torch.cuda.get_device_name(0) if torch.cuda.is_available() else "CPU" | |
| torch.tensor([0], dtype=torch.float32, device=device) | |
| if torch.cuda.is_available(): | |
| torch.cuda.empty_cache() | |
| logging.debug("GPU cache cleared") | |
| except RuntimeError as e: | |
| if "CUDA must not be initialized" in str(e): | |
| device = torch.device("cpu") | |
| device_name = "CPU (stateless GPU mode)" | |
| # else: | |
| # raise | |
| return device, device_name | |
| def generate_coturn_config(): | |
| """ | |
| Génère une configuration Coturn complète avec authentification dynamique (use-auth-secret). | |
| Returns: | |
| dict: Objet coturn_config prêt à être utilisé côté client WebRTC. | |
| """ | |
| secret_key = os.getenv("TURN_SECRET_KEY", "your_secret_key") | |
| ttl = int(os.getenv("TURN_TTL", 3600)) | |
| turn_url = os.getenv("TURN_URL", "turn:*******") | |
| turn_s_url = os.getenv("TURN_S_URL", "turns:*****") | |
| user = os.getenv("TURN_USER", "client") | |
| timestamp = int(time.time()) + ttl | |
| username = f"{timestamp}:{user}" | |
| password = base64.b64encode( | |
| hmac.new(secret_key.encode(), username.encode(), hashlib.sha1).digest() | |
| ).decode() | |
| coturn_config = { | |
| "iceServers": [ | |
| { | |
| "urls": [ | |
| f"{turn_url}", | |
| f"{turn_s_url}", | |
| ], | |
| "username": username, | |
| "credential": password, | |
| } | |
| ] | |
| } | |
| return coturn_config | |
| def raise_function(): | |
| """Raise an error randomly (1 out of 10 times).""" | |
| if random.randint(1, 50) == 1: | |
| raise RuntimeError("Random failure triggered!") |