#!/usr/bin/env python3

# Copyright © 2025 Wenze Wei
#
# This file is part of Pisces L1.
#
# Licensed under the Creative Commons Attribution-NonCommercial 4.0 International License (CC BY-NC 4.0).
# You may not use this file except in compliance with the License.
# Commercial use is strictly prohibited.
# You may obtain a copy of the License at
#
#     https://creativecommons.org/licenses/by-nc/4.0/
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import sys
import torch

def check(args=None, extra=None):
    """
    Check GPU availability and status.

    Args:
        args (optional): Optional arguments, currently not used in the function. Defaults to None.
        extra (optional): Optional extra information, currently not used in the function. Defaults to None.

    Returns:
        bool: True if tensor operations are successful, False otherwise.
    """
    # Print the start of GPU status check
    print("✅\tGPU Status Check")
    # Print a separator line
    print("✅\t" + "=" * 50)
    # Print whether PyTorch CUDA is available
    print(f"✅\tPyTorch CUDA available: {torch.cuda.is_available()}")
    
    if torch.cuda.is_available():
        # Print the CUDA version if available
        print(f"✅\tCUDA version: {torch.version.cuda}")
        # Print the number of available GPUs
        print(f"✅\tNumber of GPUs: {torch.cuda.device_count()}")
        
        # Iterate over each available GPU
        for i in range(torch.cuda.device_count()):
            # Get the properties of the current GPU
            props = torch.cuda.get_device_properties(i)
            # Print the name of the current GPU
            print(f"✅\tGPU {i}: {props.name}")
            # Print the total memory of the current GPU in GB
            print(f"✅\tMemory: {props.total_memory / 1024**3:.1f} GB")
            # Print the compute capability of the current GPU
            print(f"✅\tCompute Capability: {props.major}.{props.minor}")
            
            if i == 0:
                # Empty the CUDA cache of the first GPU
                torch.cuda.empty_cache()
                # Get the currently allocated memory of the first GPU in GB
                allocated = torch.cuda.memory_allocated(i) / 1024**3
                # Get the currently reserved memory of the first GPU in GB
                cached = torch.cuda.memory_reserved(i) / 1024**3
                # Print the allocated memory of the first GPU
                print(f"✅\tAllocated: {allocated:.2f} GB")
                # Print the cached memory of the first GPU
                print(f"✅\tCached: {cached:.2f} GB")
    else:
        # Print a message if no CUDA-capable GPU is found
        print("❌\tNo CUDA-capable GPU found")
        # Print a message indicating that training will use CPU
        print("❌\tTraining will use CPU (slower but functional)")
    
    # Print a separator line
    print("✅\t" + "=" * 50)
    # Print the start of tensor operations testing
    print("✅\tTesting tensor operations...")
    
    try:
        # Select the device (CUDA if available, otherwise CPU)
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        # Create a random tensor and move it to the selected device
        x = torch.randn(1000, 1000).to(device)
        # Create another random tensor and move it to the selected device
        y = torch.randn(1000, 1000).to(device)
        # Perform matrix multiplication on the two tensors
        z = torch.mm(x, y)
        # Print a message indicating that tensor operations are successful
        print(f"✅\tTensor operations successful on {device}")
    except Exception as e:
        # Print an error message if tensor operations fail
        print(f"❌\tTensor operations failed: {e}")
        return False
    return True