|
#!/bin/bash
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
set -e
|
|
|
|
|
|
command_exists() {
|
|
command -v "$1" >/dev/null 2>&1
|
|
}
|
|
|
|
|
|
echo "βΆ Checking for required tools (python, pip, curl)..."
|
|
if ! command_exists python3 && ! command_exists python; then
|
|
echo "β Error: Python is not installed. Please install Python 3 and try again."
|
|
exit 1
|
|
fi
|
|
|
|
PYTHON_CMD=$(command_exists python3 && echo "python3" || echo "python")
|
|
|
|
if ! command_exists pip3 && ! command_exists pip; then
|
|
echo "β Error: pip is not installed. Please install pip for Python 3 and try again."
|
|
exit 1
|
|
fi
|
|
PIP_CMD=$(command_exists pip3 && echo "pip3" || echo "pip")
|
|
|
|
if ! command_exists curl; then
|
|
echo "β Error: curl is not installed. Please install curl and try again."
|
|
exit 1
|
|
fi
|
|
echo "β
All tools are available."
|
|
|
|
|
|
echo -e "\nβΆ Installing Hugging Face libraries (transformers, torch, accelerate)..."
|
|
$PIP_CMD install transformers torch accelerate --quiet
|
|
echo "β
Libraries installed successfully."
|
|
|
|
|
|
echo -e "\nβΆ Locating transformers installation..."
|
|
TRANSFORMERS_PATH=$($PYTHON_CMD -c "import transformers, os; print(os.path.dirname(transformers.__file__))")
|
|
|
|
if [ -z "$TRANSFORMERS_PATH" ]; then
|
|
echo "β Error: Could not find the transformers library installation path."
|
|
exit 1
|
|
fi
|
|
echo "β
Found transformers at: $TRANSFORMERS_PATH"
|
|
|
|
MODEL_PATH="$TRANSFORMERS_PATH/models/echo"
|
|
|
|
if [ -d "$MODEL_PATH" ]; then
|
|
echo "β
Patch directory '$MODEL_PATH' already exists. No action needed."
|
|
echo -e "\nπ Patching complete! You can now use 'Echo' models."
|
|
exit 0
|
|
fi
|
|
|
|
echo -e "\nβΆ Applying patch: Creating 'echo' model directory..."
|
|
mkdir -p "$MODEL_PATH"
|
|
|
|
echo "βΆ Downloading model architecture files..."
|
|
CONFIG_URL="https://huggingface.co/MythWorxAI/Echo-mini/raw/main/configuration_echo.py"
|
|
MODELING_URL="https://huggingface.co/MythWorxAI/Echo-mini/raw/main/modeling_echo.py"
|
|
|
|
curl -fL "$CONFIG_URL" -o "$MODEL_PATH/configuration_echo.py"
|
|
curl -fL "$MODELING_URL" -o "$MODEL_PATH/modeling_echo.py"
|
|
|
|
echo "β
Model files downloaded."
|
|
|
|
echo "βΆ Finalizing module structure..."
|
|
|
|
|
|
touch "$MODEL_PATH/__init__.py"
|
|
echo "β
Module created."
|
|
|
|
|
|
echo -e "\nπ Patching complete! The 'transformers' library now natively supports 'echo' models."
|
|
echo " You can now load 'MythWorxAI/Echo-mini' without 'trust_remote_code=True'."
|
|
|
|
echo -e "\nπ§ͺ To test the installation, run the following Python code:"
|
|
echo
|
|
echo "from transformers import AutoTokenizer, AutoModelForCausalLM"
|
|
echo
|
|
echo "model_id = 'MythWorxAI/Echo-mini'"
|
|
echo "print(f\"Loading model: {model_id}\")"
|
|
echo
|
|
echo "# This now works without trust_remote_code=True"
|
|
echo "tokenizer = AutoTokenizer.from_pretrained(model_id)"
|
|
echo "model = AutoModelForCausalLM.from_pretrained(model_id)"
|
|
echo
|
|
echo "print('β
Model and tokenizer loaded successfully!')"
|
|
echo "print(model.config)"
|
|
|
|
|