#!/bin/bash

export ARCH_AND_GPU_INFO="cpu"

install_torch_win() {
    PYTORCH_CUDA_PREFIX=$1
    INDEX_TYPE=$2
    export TMP_TORCH_VERSION=""
    local TORCH_INDEX_URL="https://download.pytorch.org/whl/nightly/cpu"
    if ! command -v nvidia-smi &> /dev/null; then
        export TMP_TORCH_VERSION="==2.7.1"
        if [ -n "$INSTALL_TORCH_VERSION" ]; then
            export TMP_TORCH_VERSION="==$INSTALL_TORCH_VERSION"
        fi
        echo "No NVIDIA driver detected."
        if command -v rocm-smi &> /dev/null && rocm-smi | grep -i "amd" > /dev/null; then
            echo "Found AMD GPU"
            TORCH_INDEX_URL=https://download.pytorch.org/whl/rocm6.2
        else
            if [ "$HAS_INTEL_ARC_GPU" = "1" ]; then
                echo "Found INTEL ARC GPU"
                TORCH_INDEX_URL="${PYTORCH_CUDA_PREFIX}/xpu"
            else
                echo "only cpu"
                TORCH_INDEX_URL=""
            fi
        fi
    else
        echo "Found NVIDIA driver"
        nvidia_output=$(nvidia-smi)
        echo "$nvidia_output"

        exit_code=$?
        if [[ $exit_code -ne 0 ]]; then
            echo "exit_code $exit_code"
        else
            cuda_version=$(echo "$nvidia_output" | grep "CUDA Version" | sed 's/.*CUDA Version: //; s/ .*//')
            if [ -n "$cuda_version" ]; then
                echo "Got CUDA Ver: $cuda_version"
            else
                echo "no CUDA ver"
            fi
        fi
        
        if [ -z "$cuda_version" ]; then
            echo "Error: Unable to extract CUDA version"
            TORCH_INDEX_URL="${PYTORCH_CUDA_PREFIX}/cu128"
            export TMP_TORCH_VERSION="==2.7.1"
            if [ -n "$INSTALL_TORCH_VERSION" ]; then
                export TMP_TORCH_VERSION="==$INSTALL_TORCH_VERSION"
            fi
        else
            # only cuda >= 12.6 support torch 2.8.0
            if [[ "$cuda_version" == "12.8" || "$cuda_version" == "12.9" || "$cuda_version" == "13.0" || "$cuda_version" =~ ^13\.[1-9] ]]; then
                TORCH_INDEX_URL="${PYTORCH_CUDA_PREFIX}/cu128"
                export TMP_TORCH_VERSION="==2.7.1"
            elif [[ "$cuda_version" == "12.6" || "$cuda_version" == "12.7" ]]; then
                TORCH_INDEX_URL="${PYTORCH_CUDA_PREFIX}/cu126"
                export TMP_TORCH_VERSION="==2.7.1"
            elif [[ "$cuda_version" == "12.4" || "$cuda_version" == "12.5" ]]; then
                TORCH_INDEX_URL="${PYTORCH_CUDA_PREFIX}/cu124"
            elif [[ "$cuda_version" == "12.0" || "$cuda_version" == "12.1" || "$cuda_version" == "12.2" || "$cuda_version" == "12.3" ]]; then
                TORCH_INDEX_URL="${PYTORCH_CUDA_PREFIX}/cu121"
            else
                TORCH_INDEX_URL="${PYTORCH_CUDA_PREFIX}/cu128"
            fi

            if [ -n "$INSTALL_TORCH_VERSION" ]; then
                export TMP_TORCH_VERSION="==$INSTALL_TORCH_VERSION"
                TORCH_INDEX_URL="${PYTORCH_CUDA_PREFIX}/${INSTALL_TORCH_INDEX_PATH}"
                echo "the INSTALL_TORCH_VERSION is $INSTALL_TORCH_VERSION, should use index //.../${INSTALL_TORCH_INDEX_PATH}"
            fi
            export ARCH_AND_GPU_INFO="nvidia_gpu"
        fi

    fi
    

    if [ -z "$TORCH_INDEX_URL" ]; then
        INDEX_PARAM=""
        echo "Do not set index url."
    else
        if [ "$INDEX_TYPE" = "page" ]; then
            INDEX_PARAM="-f $TORCH_INDEX_URL"
        else 
            INDEX_PARAM="--index-url $TORCH_INDEX_URL"
        fi
    fi

    echo "Start install torch from: $TORCH_INDEX_URL"
    echo "INDEX_PARAM: $INDEX_PARAM"
     
    conda_run_pip_install torch${TMP_TORCH_VERSION} torchvision torchaudio $INDEX_PARAM
    
}

