@echo off
chcp 65001 >nul
echo ============================================
echo 🚀 ComfyUI BuimenLabo - Installation Script
echo 📝 Blog: https://note.com/hirodream44
echo ============================================
echo.

echo [1/4] Checking Python environment...
python --version >nul 2>&1
if %errorlevel% neq 0 (
    echo ❌ ERROR: Python not found. Please install Python 3.8+ first.
    pause
    exit /b 1
)
python -c "import sys; print(f'✅ Python {sys.version.split()[0]} found')"

echo.
echo [2/4] Installing core dependencies...
pip install -q requests>=2.28.0 numpy>=1.21.0 Pillow>=9.0.0 psutil>=5.9.0
if %errorlevel% neq 0 (
    echo ⚠️ WARNING: Some core dependencies failed to install
    echo This may be normal if they're already installed
)
echo ✅ Core dependencies processed

echo.
echo [3/4] Installing Gemini API support...
pip install -q google-generativeai>=0.3.0
if %errorlevel% neq 0 (
    echo ⚠️ WARNING: Failed to install google-generativeai
    echo You can install it manually: pip install google-generativeai
) else (
    echo ✅ Gemini API support installed
)

echo.
echo [4/4] Llama-CPP Python Setup...
echo.
echo 🤖 Choose your hardware setup:
echo   1. 🖥️  CPU only (universal, slower)
echo   2. 🎮 NVIDIA GPU (CUDA) - recommended for RTX cards
echo   3. 🍎 Apple Silicon (Metal) - for M1/M2/M3 Macs
echo   4. 🔴 AMD GPU (ROCm) - experimental
echo   5. ⏭️  Skip (install manually later)
echo.
set /p choice="Enter your choice (1-5): "

if "%choice%"=="1" (
    echo 🖥️ Installing CPU version...
    pip install llama-cpp-python
    if %errorlevel% neq 0 echo ❌ Installation failed
) else if "%choice%"=="2" (
    echo 🎮 Installing NVIDIA CUDA version...
    echo ⏳ This may take 5-10 minutes...
    set CMAKE_ARGS=-DGGML_CUDA=on
    pip install llama-cpp-python --force-reinstall --no-cache-dir --verbose
    if %errorlevel% neq 0 echo ❌ CUDA installation failed
) else if "%choice%"=="3" (
    echo 🍎 Installing Apple Metal version...
    set CMAKE_ARGS=-DGGML_METAL=on
    pip install llama-cpp-python --force-reinstall --no-cache-dir --verbose
    if %errorlevel% neq 0 echo ❌ Metal installation failed
) else if "%choice%"=="4" (
    echo 🔴 Installing AMD ROCm version...
    set CMAKE_ARGS=-DGGML_ROCM=on
    pip install llama-cpp-python --force-reinstall --no-cache-dir --verbose
    if %errorlevel% neq 0 echo ❌ ROCm installation failed
) else if "%choice%"=="5" (
    echo ⏭️ Skipping llama-cpp-python
) else (
    echo ❌ Invalid choice. Skipping llama-cpp-python.
)

echo.
echo ============================================
echo ✅ Installation completed successfully!
echo ============================================
echo.
echo 📋 Next steps:
echo   1. 🔄 Restart ComfyUI completely
echo   2. 🔍 Look for "BuimenLabo" nodes in Add Node menu
echo   3. 🌐 Check for language toggle button (🌐) in top-right corner
echo.
echo 🔧 Setup guides:
echo   📸 Gemini Pose Analyzer:
echo     • Get API key: https://makersuite.google.com/app/apikey
echo   
echo   🤖 LLM Text Generation:
echo     • Download GGUF models from HuggingFace
echo     • Place in: ComfyUI/models/llm/ (auto-created)
echo     • Recommended: Qwen2.5-7B-Instruct-Q8_0.gguf
echo   
echo   🌍 Multi-language support: 20 languages available
echo   🎛️ ControlNet Loader: Load multiple ControlNets easily
echo   🌐 Prompt Translator: Google/DeepL translation support
echo.
echo 🆘 Troubleshooting:
echo   • Language toggle missing → Refresh browser (F5)
echo   • VRAM issues → Lower GPU layers or use smaller models
echo   • API errors → Check internet connection and API keys
echo   • Installation issues → Run as administrator
echo.
echo 📝 Blog: https://note.com/hirodream44
echo 💬 Support: Check ComfyUI Manager for updates
echo.
pause