#!/bin/bash

source "${LMD_BASE_INSTALL_SCRIPT_DIR}/global/conda_run.sh"

get_torch_cuda_whl_version() {
    local package=$1
    local cuda=$2
    case $cuda in
        cu121)
            [[ $package == "torch" ]] && echo "2.8.0"
            [[ $package == "torchvision" ]] && echo "0.23.1"
            [[ $package == "torchaudio" ]] && echo "2.8.0"
            ;;
        cu124)
            [[ $package == "torch" ]] && echo "2.8.0"
            [[ $package == "torchvision" ]] && echo "0.23.1"
            [[ $package == "torchaudio" ]] && echo "2.8.0"
            ;;
        cu126)
            [[ $package == "torch" ]] && echo "2.8.0"
            [[ $package == "torchvision" ]] && echo "0.23.1"
            [[ $package == "torchaudio" ]] && echo "2.8.0"
            ;;
        cu128)
            [[ $package == "torch" ]] && echo "2.8.0"
            [[ $package == "torchvision" ]] && echo "0.23.1"
            [[ $package == "torchaudio" ]] && echo "2.8.0"
            ;;
        *)
            echo "" >&2
            exit 1
            ;;
    esac
}

export TORCH_CUDA_URL_PREFIX="https://mirrors.aliyun.com/pytorch-wheels/"

# gen whl file name
gen_torch_cuda_whl_name() {
    local package=$1      # torch/torchvision/torchaudio
    local cuda=$2         # cu121/cu124/cu126/cu128
    local pyver=$PYTHON_VER_ENV # 3.10/3.11/3.12

    # version, whl format
    local pymaj=${pyver%.*}
    local pymin=${pyver#*.}
    local pycp="cp${pymaj}${pymin}"

    # version
    local pkgver
    pkgver=$(get_torch_cuda_whl_version $package $cuda)
    
    # eg: torch-2.6.0+cu124-cp310-cp310-win_amd64.whl
    echo "${TORCH_CUDA_URL_PREFIX}${cuda}/${package}-${pkgver}+${cuda}-${pycp}-${pycp}-win_amd64.whl"
}

install_torch_cuda() {
    TORCH_INDEX_VERSION="cu128"
    if [ -z "$cuda_version" ]; then
        echo "Error: Unable to extract CUDA version number from nvcc output."
        TORCH_INDEX_VERSION="cu128"
    else 
        if [[ "$cuda_version" == "12.8" || "$cuda_version" == "12.9" || "$cuda_version" == "13.0" || "$cuda_version" =~ ^13\.[1-9] ]]; then
            TORCH_INDEX_VERSION="cu128"
        elif [[ "$cuda_version" == "12.6" || "$cuda_version" == "12.7" ]]; then
            TORCH_INDEX_VERSION="cu126"
        elif [[ "$cuda_version" == "12.4" || "$cuda_version" == "12.5" ]]; then
            TORCH_INDEX_VERSION="cu124"
        elif [[ "$cuda_version" == "12.0" || "$cuda_version" == "12.1" || "$cuda_version" == "12.2" || "$cuda_version" == "12.3" ]]; then
            TORCH_INDEX_VERSION="cu121"
        else
            TORCH_INDEX_VERSION="cu128"
        fi
    fi
    
    local torch_whell_name=$(gen_torch_cuda_whl_name torch $TORCH_INDEX_VERSION)
    local torchaudio_whell_name=$(gen_torch_cuda_whl_name torchaudio $TORCH_INDEX_VERSION)
    local torchvision_whell_name=$(gen_torch_cuda_whl_name torchvision $TORCH_INDEX_VERSION)

    echo $torch_whell_name
    echo $torchaudio_whell_name
    echo $torchvision_whell_name

    conda_run_pip_install $torch_whell_name $torchaudio_whell_name $torchvision_whell_name
}


install_torch() {
    local TORCH_INDEX_URL="https://download.pytorch.org/whl/nightly/cpu"
    if ! command -v nvidia-smi &> /dev/null; then
        echo "No NVIDIA driver detected."
        if command -v rocm-smi &> /dev/null && rocm-smi | grep -i "amd" > /dev/null; then
            echo "Found AMD GPU"
            TORCH_INDEX_URL=https://download.pytorch.org/whl/rocm6.2
        else
            echo "only cpu"
            TORCH_INDEX_URL=""
        fi
    else
        if ! command -v nvcc &> /dev/null; then
            echo "Error: The nvcc command was not found. Make sure you have the CUDA Toolkit installed."
        else
            echo "Found NVIDIA drivers and cuda"
            nvcc_output=$(nvcc --version)
            if [[ $? -ne 0 ]]; then
                echo "Error: Failed to execute 'nvcc --version'." >&2
                # exit 1
            fi

            cuda_line=$(echo "$nvcc_output" | grep 'Cuda compilation tools')

            if [[ $cuda_line =~ release\ ([0-9]+\.[0-9]+) ]]; then
                export cuda_version=${BASH_REMATCH[1]}
                echo "CUDA Version: $cuda_version"
            else
                echo "Error: Unable to parse CUDA version from output." >&2
            fi

            if [ -n "$GITHUB_PROXY" ]; then
                # if the proxy exists. use fixed version.
                install_torch_cuda $cuda_version
                return
            else
                # install torch with index-url
                if [ -z "$cuda_version" ]; then
                    echo "Error: Unable to extract CUDA version number from nvcc output."
                    TORCH_INDEX_URL="https://download.pytorch.org/whl/cu126"
                else
                    if [[ "$cuda_version" == "12.8" || "$cuda_version" == "12.9" || "$cuda_version" == "13.0" || "$cuda_version" =~ ^13\.[1-9] ]]; then
                        TORCH_INDEX_URL="https://download.pytorch.org/whl/cu128"
                    elif [[ "$cuda_version" == "12.6" || "$cuda_version" == "12.7" ]]; then
                        TORCH_INDEX_URL="https://download.pytorch.org/whl/cu126"
                    elif [[ "$cuda_version" == "12.4" || "$cuda_version" == "12.5" ]]; then
                        TORCH_INDEX_URL="https://download.pytorch.org/whl/cu124"
                    elif [[ "$cuda_version" == "12.0" || "$cuda_version" == "12.1" || "$cuda_version" == "12.2" || "$cuda_version" == "12.3" ]]; then
                        TORCH_INDEX_URL="https://download.pytorch.org/whl/cu121"
                    else
                        TORCH_INDEX_URL="https://download.pytorch.org/whl/cu121"
                    fi
                fi
            fi
        fi
    fi
    

    if [ -z "$TORCH_INDEX_URL" ]; then
        INDEX_PARAM=""
        echo "Do not set index url."
    else
        INDEX_PARAM="--index-url $TORCH_INDEX_URL"
    fi

    echo "Start install torch from: $TORCH_INDEX_URL"
    echo "INDEX_PARAM: $INDEX_PARAM"
    conda_run_pip_install torch==2.8.0 torchvision torchaudio $INDEX_PARAM
}

install_torch
