ozayezerceli's picture
Create .github/workflows/evaluate.yml
29e9498 verified
raw
history blame
878 Bytes
name: Model Evaluation
on:
workflow_dispatch:
schedule:
- cron: '0 0 * * *' # Run daily at midnight UTC
jobs:
evaluate:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: '3.9'
- name: Install dependencies
run: |
pip install -r requirements.txt
- name: Run evaluation
env:
HF_TOKEN: ${{ secrets.HF_TOKEN }}
run: |
python scripts/run_evaluation.py
- name: Commit and push results
run: |
git config --local user.email "action@github.com"
git config --local user.name "GitHub Action"
git add benchmark_results/*
git commit -m "Update evaluation results" || exit 0
git push