text stringlengths 1 262k | target_text stringlengths 1 3.1k | context_documents listlengths 0 8 | repo stringlengths 7 93 | path stringlengths 5 228 |
|---|---|---|---|---|
"""Unit tests for LogPublisher.
Tests log publishing to Redis Pub/Sub, history storage, and retrieval.
"""
import json
from unittest.mock import Mock, patch, AsyncMock
import pytest
from lazy_bird.services.log_publisher import (
LogPublisher,
publish_task_log,
)
class TestLogPublisherInit:
"""Test LogPublisher initialization."""
def test_init_sync(self):
"""Test LogPublisher with sync client."""
publisher = LogPublisher(use_async=False)
assert publisher.use_async is False
assert publisher._redis is None
def test_init_async(self):
"""Test LogPublisher with async client."""
publisher = LogPublisher(use_async=True)
assert publisher.use_async is True
assert publisher._redis is None
class TestGetChannel:
"""Test _get_channel method."""
def test_get_channel_task_specific(self):
"""Test channel for task-specific logs."""
publisher = LogPublisher()
channel = publisher._get_channel(task_id="task-123")
assert channel == "lazy_bird:logs:task:task-123"
def test_get_channel_project_specific(self):
"""Test channel for project-specific logs."""
publisher = LogPublisher()
channel = publisher._get_channel(project_id="my-project")
assert channel == "lazy_bird:logs:project:my-project"
def test_get_channel_system(self):
"""Test channel for system logs."""
publisher = LogPublisher()
channel = publisher._get_channel()
assert channel == "lazy_bird:logs:system"
def test_get_channel_priority(self):
"""Test task_id takes priority over project_id."""
publisher = LogPublisher()
channel = publisher._get_channel(task_id="task-123", project_id="my-project")
assert channel == "lazy_bird:logs:task:task-123"
class TestBuildLogEntry:
"""Test _build_log_entry method."""
def test_build_log_entry_minimal(self):
"""Test building minimal log entry."""
publisher = LogPublisher()
entry = publisher._build_log_entry(
message="Test message",
level="INFO",
task_id=None,
project_id=None,
metadata=None,
)
assert entry["message"] == "Test message"
assert entry["level"] == "INFO"
assert "timestamp" in entry
assert "task_id" not in entry
assert "project_id" not in entry
def test_build_log_entry_with_ids(self):
"""Test building log entry with task and project IDs."""
publisher = LogPublisher()
entry = publisher._build_log_entry(
message="Test message",
level="DEBUG",
task_id="task-123",
project_id="my-project",
metadata=None,
)
assert entry["task_id"] == "task-123"
assert entry["project_id"] == "my-project"
def test_build_log_entry_with_metadata(self):
"""Test building log entry with metadata."""
publisher = LogPublisher()
metadata = {"test_framework": "pytest", "tests_passed": 10}
entry = publisher._build_log_entry(
message="Tests completed",
level="INFO",
task_id=None,
project_id=None,
metadata=metadata,
)
assert entry["metadata"] == metadata
class TestPublishLog:
"""Test publish_log method (synchronous)."""
def test_publish_log_success(self):
"""Test successful log publishing."""
publisher = LogPublisher()
with patch("lazy_bird.services.log_publisher.get_redis") as mock_get_redis:
mock_redis = Mock()
mock_redis.publish.return_value = 1 # 1 subscriber
mock_get_redis.return_value = mock_redis
result = publisher.publish_log(
message="Test message",
level="INFO",
task_id="task-123",
)
assert result is True
assert mock_redis.publish.called
assert mock_redis.lpush.called # History storage
# Check channel name
call_args = mock_redis.publish.call_args[0]
assert call_args[0] == "lazy_bird:logs:task:task-123"
# Check message is JSON
log_json = call_args[1]
log_data = json.loads(log_json)
assert log_data["message"] == "Test message"
assert log_data["level"] == "INFO"
def test_publish_log_with_metadata(self):
"""Test publishing log with metadata."""
publisher = LogPublisher()
with patch("lazy_bird.services.log_publisher.get_redis") as mock_get_redis:
mock_redis = Mock()
mock_redis.publish.return_value = 1
mock_get_redis.return_value = mock_redis
metadata = {"test": "value"}
result = publisher.publish_log(
message="Test",
level="INFO",
task_id="task-123",
metadata=metadata,
)
assert result is True
# Check metadata is included
call_args = mock_redis.publish.call_args[0]
log_data = json.loads(call_args[1])
assert log_data["metadata"] == metadata
def test_publish_log_connection_error(self):
"""Test publishing with Redis connection error."""
publisher = LogPublisher()
with patch("lazy_bird.services.log_publisher.get_redis") as mock_get_redis:
from redis.exceptions import ConnectionError
mock_get_redis.side_effect = ConnectionError("Connection failed")
result = publisher.publish_log(
message="Test",
level="INFO",
task_id="task-123",
)
assert result is False
@pytest.mark.asyncio
class TestPublishLogAsync:
"""Test publish_log_async method (asynchronous)."""
async def test_publish_log_async_success(self):
"""Test successful async log publishing."""
publisher = LogPublisher(use_async=True)
with patch("lazy_bird.services.log_publisher.get_async_redis") as mock_get_redis:
mock_redis = AsyncMock()
mock_redis.publish.return_value = 1 # 1 subscriber
mock_get_redis.return_value = mock_redis
result = await publisher.publish_log_async(
message="Test message",
level="INFO",
task_id="task-123",
)
assert result is True
assert mock_redis.publish.called
assert mock_redis.lpush.called # History storage
async def test_publish_log_async_connection_error(self):
"""Test async publishing with connection error."""
publisher = LogPublisher(use_async=True)
with patch("lazy_bird.services.log_publisher.get_async_redis") as mock_get_redis:
from redis.exceptions import ConnectionError
mock_get_redis.side_effect = ConnectionError("Connection failed")
result = await publisher.publish_log_async(
message="Test",
level="INFO",
task_id="task-123",
)
assert result is False
class TestGetLogHistory:
"""Test get_log_history method (synchronous)."""
def test_get_log_history_success(self):
"""Test retrieving log history."""
publisher = LogPublisher()
with patch("lazy_bird.services.log_publisher.get_redis") as mock_get_redis:
mock_redis = Mock()
log_entries = [
json.dumps({"message": "Log 1", "level": "INFO"}),
json.dumps({"message": "Log 2", "level": "DEBUG"}),
]
mock_redis.lrange.return_value = log_entries
mock_get_redis.return_value = mock_redis
history = publisher.get_log_history(task_id="task-123", limit=10)
assert len(history) == 2
assert history[0]["message"] == "Log 1"
assert history[1]["message"] == "Log 2"
def test_get_log_history_empty(self):
"""Test retrieving empty log history."""
publisher = LogPublisher()
with patch("lazy_bird.services.log_publisher.get_redis") as mock_get_redis:
mock_redis = Mock()
mock_redis.lrange.return_value = []
mock_get_redis.return_value = mock_redis
history = publisher.get_log_history(task_id="task-123")
assert history == []
def test_get_log_history_connection_error(self):
"""Test get history with connection error."""
publisher = LogPublisher()
with patch("lazy_bird.services.log_publisher.get_redis") as mock_get_redis:
from redis.exceptions import ConnectionError
mock_get_redis.side_effect = ConnectionError("Connection failed")
history = publisher.get_log_history(task_id="task-123")
assert history == []
@pytest.mark.asyncio
class TestGetLogHistoryAsync:
"""Test get_log_history_async method (asynchronous)."""
async def test_get_log_history_async_success(self):
"""Test async log history retrieval."""
publisher = LogPublisher(use_async=True)
with patch("lazy_bird.services.log_publisher.get_async_redis") as mock_get_redis:
mock_redis = AsyncMock()
log_entries = [
json.dumps({"message": "Log 1", "level": "INFO"}),
json.dumps({"message": "Log 2", "level": "DEBUG"}),
]
mock_redis.lrange.return_value = log_entries
mock_get_redis.return_value = mock_redis
history = await publisher.get_log_history_async(task_id="task-123")
assert len(history) == 2
assert history[0]["message"] == "Log 1"
class TestPublishTaskLog:
"""Test publish_task_log convenience function."""
def test_publish_task_log_success(self):
"""Test convenience function for publishing task logs."""
with patch("lazy_bird.services.log_publisher.get_redis") as mock_get_redis:
mock_redis = Mock()
mock_redis.publish.return_value = 1
mock_get_redis.return_value = mock_redis
result = publish_task_log(
message="Test task log",
task_id="task-123",
level="INFO",
test_framework="pytest",
)
assert result is True
assert mock_redis.publish.called
# Check metadata was passed
call_args = mock_redis.publish.call_args[0]
log_data = json.loads(call_args[1])
assert log_data["metadata"] == {"test_framework": "pytest"}
| """Unit tests for LogPublisher.
Tests log publishing to Redis Pub/Sub, history storage, and retrieval.
"""
import json
from unittest.mock import Mock, patch, AsyncMock
import pytest
from lazy_bird.services.log_publisher import (
LogPublisher,
publish_task_log,
)
class TestLogPublisherInit:
"""Test LogPublisher initialization."""
def test_init_sync(self):
"""Test LogPublisher with sync client."""
publisher = LogPublisher(use_async=False)
assert publisher.use_async is False
assert publisher._redis is None
def test_init_async(self):
"""Test LogPublisher with async client."""
publisher = LogPublisher(use_async=True)
assert publisher.use_async is True
assert publisher._redis is None
class TestGetChannel:
"""Test _get_channel method."""
def test_get_channel_task_specific(self):
"""Test channel for task-specific logs."""
publisher = LogPublisher()
channel = publisher._get_channel(task_id="task-123")
assert channel == "lazy_bird:logs:task:task-123"
def test_get_channel_project_specific(self):
"""Test channel for project-specific logs."""
publisher = LogPublisher()
channel = publisher._get_channel(project_id="my-project")
assert channel == "lazy_bird:logs:project:my-project"
def test_get_channel_system(self):
""" | [
"# yusufkaraaslan/lazy-bird:lazy_bird/services/log_publisher.py\nLogPublisher"
] | yusufkaraaslan/lazy-bird | tests/unit/test_log_publisher.py |
"""Tests for issue watcher Celery task.
Tests for lazy_bird/tasks/issue_watcher.py:
- Finds ready issues and creates TaskRuns
- Skips already-processed issues (existing TaskRun)
- Handles API errors gracefully
- Skips inactive projects
"""
import uuid
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
def _make_scalars_result(items):
"""Return a mock result whose .scalars().all() returns items."""
result = AsyncMock()
result.scalars = MagicMock(return_value=MagicMock(all=MagicMock(return_value=items)))
return result
def _make_scalar_one_result(value):
"""Return a mock result whose .scalar_one_or_none() returns value."""
result = AsyncMock()
result.scalar_one_or_none = MagicMock(return_value=value)
return result
class TestParseGithubRepo:
"""Test GitHub repo URL parsing."""
def test_parse_https_url(self):
from lazy_bird.tasks.issue_watcher import _parse_github_repo
result = _parse_github_repo("https://github.com/owner/repo")
assert result == ("owner", "repo")
def test_parse_https_url_with_git(self):
from lazy_bird.tasks.issue_watcher import _parse_github_repo
result = _parse_github_repo("https://github.com/owner/repo.git")
assert result == ("owner", "repo")
def test_parse_ssh_url(self):
from lazy_bird.tasks.issue_watcher import _parse_github_repo
result = _parse_github_repo("git@github.com:owner/repo.git")
assert result == ("owner", "repo")
def test_parse_invalid_url(self):
from lazy_bird.tasks.issue_watcher import _parse_github_repo
result = _parse_github_repo("https://gitlab.com/owner/repo")
assert result is None
class TestParseGitlabRepo:
"""Test GitLab repo URL parsing."""
def test_parse_simple_url(self):
from lazy_bird.tasks.issue_watcher import _parse_gitlab_repo
result = _parse_gitlab_repo("https://gitlab.com/owner/repo")
assert result == "owner%2Frepo"
def test_parse_url_with_git(self):
from lazy_bird.tasks.issue_watcher import _parse_gitlab_repo
result = _parse_gitlab_repo("https://gitlab.com/group/subgroup/repo.git")
assert result == "group%2Fsubgroup%2Frepo"
class TestDetectPlatform:
"""Test platform detection logic."""
def test_detect_from_source_platform(self):
from lazy_bird.tasks.issue_watcher import _detect_platform
project = MagicMock()
project.source_platform = "github"
project.repo_url = "https://example.com/repo"
assert _detect_platform(project) == "github"
def test_detect_from_repo_url_github(self):
from lazy_bird.tasks.issue_watcher import _detect_platform
project = MagicMock()
project.source_platform = None
project.repo_url = "https://github.com/owner/repo"
assert _detect_platform(project) == "github"
def test_detect_from_repo_url_gitlab(self):
from lazy_bird.tasks.issue_watcher import _detect_platform
project = MagicMock()
project.source_platform = None
project.repo_url = "https://gitlab.com/owner/repo"
assert _detect_platform(project) == "gitlab"
def test_detect_unknown(self):
from lazy_bird.tasks.issue_watcher import _detect_platform
project = MagicMock()
project.source_platform = None
project.repo_url = "https://bitbucket.org/owner/repo"
assert _detect_platform(project) is None
class TestWatchIssuesGithub:
"""Test watch_issues with GitHub projects."""
@pytest.mark.asyncio
async def test_finds_ready_issues_and_creates_task_runs(self):
"""Should create TaskRuns for new ready issues."""
from lazy_bird.tasks.issue_watcher import _watch_issues_async
project_id = uuid.uuid4()
mock_project = MagicMock()
mock_project.id = project_id
mock_project.name = "test-project"
mock_project.source_platform = "github"
mock_project.repo_url = "https://github.com/owner/repo"
mock_project.automation_enabled = True
mock_project.deleted_at = None
mock_db = AsyncMock()
call_count = [0]
async def mock_execute(stmt):
call_count[0] += 1
if call_count[0] == 1:
# Active projects query
return _make_scalars_result([mock_project])
elif call_count[0] == 2:
# Existing TaskRun check - none found
return _make_scalar_one_result(None)
return AsyncMock()
mock_db.execute = mock_execute
mock_db.add = MagicMock()
mock_db.commit = AsyncMock()
github_issues = [
{
"number": 42,
"title": "Add health system",
"body": "## Description\nImplement health tracking...",
"html_url": "https://github.com/owner/repo/issues/42",
"labels": [{"name": "ready"}],
}
]
mock_response = MagicMock()
mock_response.json.return_value = github_issues
mock_response.raise_for_status = MagicMock()
mock_client = AsyncMock()
mock_client.get = AsyncMock(return_value=mock_response)
mock_client.delete = AsyncMock()
mock_client.post = AsyncMock()
with (
patch(
"lazy_bird.tasks.issue_watcher.get_async_db",
return_value=_async_gen(mock_db),
),
patch(
"lazy_bird.tasks.issue_watcher.httpx.AsyncClient",
) as mock_client_cls,
patch(
"lazy_bird.tasks.issue_watcher.settings",
) as mock_settings,
):
mock_settings.GITHUB_TOKEN = "ghp_test_token"
mock_settings.GITLAB_TOKEN = None
# Make AsyncClient context manager return our mock
mock_client_ctx = AsyncMock()
mock_client_ctx.__aenter__ = AsyncMock(return_value=mock_client)
mock_client_ctx.__aexit__ = AsyncMock(return_value=False)
mock_client_cls.return_value = mock_client_ctx
result = await _watch_issues_async()
assert result["projects_checked"] == 1
assert result["issues_found"] == 1
assert result["task_runs_created"] == 1
assert result["issues_skipped"] == 0
assert result["errors"] == []
mock_db.add.assert_called_once()
mock_db.commit.assert_called()
@pytest.mark.asyncio
async def test_skips_already_processed_issues(self):
"""Should skip issues that already have a TaskRun."""
from lazy_bird.tasks.issue_watcher import _watch_issues_async
project_id = uuid.uuid4()
mock_project = MagicMock()
mock_project.id = project_id
mock_project.name = "test-project"
mock_project.source_platform = "github"
mock_project.repo_url = "https://github.com/owner/repo"
mock_project.automation_enabled = True
mock_project.deleted_at = None
mock_db = AsyncMock()
existing_task_run = MagicMock()
existing_task_run.id = uuid.uuid4()
call_count = [0]
async def mock_execute(stmt):
call_count[0] += 1
if call_count[0] == 1:
return _make_scalars_result([mock_project])
elif call_count[0] == 2:
# Existing TaskRun found
return _make_scalar_one_result(existing_task_run)
return AsyncMock()
mock_db.execute = mock_execute
mock_db.add = MagicMock()
mock_db.commit = AsyncMock()
github_issues = [
{
"number": 42,
"title": "Add health system",
"body": "Implement health",
"html_url": "https://github.com/owner/repo/issues/42",
"labels": [{"name": "ready"}],
}
]
mock_response = MagicMock()
mock_response.json.return_value = github_issues
mock_response.raise_for_status = MagicMock()
mock_client = AsyncMock()
mock_client.get = AsyncMock(return_value=mock_response)
with (
patch(
"lazy_bird.tasks.issue_watcher.get_async_db",
return_value=_async_gen(mock_db),
),
patch(
"lazy_bird.tasks.issue_watcher.httpx.AsyncClient",
) as mock_client_cls,
patch(
"lazy_bird.tasks.issue_watcher.settings",
) as mock_settings,
):
mock_settings.GITHUB_TOKEN = "ghp_test_token"
mock_settings.GITLAB_TOKEN = None
mock_client_ctx = AsyncMock()
mock_client_ctx.__aenter__ = AsyncMock(return_value=mock_client)
mock_client_ctx.__aexit__ = AsyncMock(return_value=False)
mock_client_cls.return_value = mock_client_ctx
result = await _watch_issues_async()
assert result["issues_found"] == 1
assert result["task_runs_created"] == 0
assert result["issues_skipped"] == 1
mock_db.add.assert_not_called()
@pytest.mark.asyncio
async def test_handles_api_errors_gracefully(self):
"""Should handle API errors without crashing."""
from lazy_bird.tasks.issue_watcher import _watch_issues_async
project_id = uuid.uuid4()
mock_project = MagicMock()
mock_project.id = project_id
mock_project.name = "test-project"
mock_project.source_platform = "github"
mock_project.repo_url = "https://github.com/owner/repo"
mock_project.automation_enabled = True
mock_project.deleted_at = None
mock_db = AsyncMock()
call_count = [0]
async def mock_execute(stmt):
call_count[0] += 1
if call_count[0] == 1:
return _make_scalars_result([mock_project])
return AsyncMock()
mock_db.execute = mock_execute
# Client raises HTTP error
mock_client = AsyncMock()
mock_response = MagicMock()
mock_response.raise_for_status.side_effect = Exception("API rate limit exceeded")
mock_client.get = AsyncMock(return_value=mock_response)
with (
patch(
"lazy_bird.tasks.issue_watcher.get_async_db",
return_value=_async_gen(mock_db),
),
patch(
"lazy_bird.tasks.issue_watcher.httpx.AsyncClient",
) as mock_client_cls,
patch(
"lazy_bird.tasks.issue_watcher.settings",
) as mock_settings,
):
mock_settings.GITHUB_TOKEN = "ghp_test_token"
mock_settings.GITLAB_TOKEN = None
mock_client_ctx = AsyncMock()
mock_client_ctx.__aenter__ = AsyncMock(return_value=mock_client)
mock_client_ctx.__aexit__ = AsyncMock(return_value=False)
mock_client_cls.return_value = mock_client_ctx
result = await _watch_issues_async()
# Should not crash, should record error
assert len(result["errors"]) == 1
assert "API rate limit exceeded" in result["errors"][0]
@pytest.mark.asyncio
async def test_skips_inactive_projects(self):
"""Should return early when no active projects found."""
from lazy_bird.tasks.issue_watcher import _watch_issues_async
mock_db = AsyncMock()
mock_db.execute = AsyncMock(return_value=_make_scalars_result([]))
with patch(
"lazy_bird.tasks.issue_watcher.get_async_db",
return_value=_async_gen(mock_db),
):
result = await _watch_issues_async()
assert result["projects_checked"] == 0
assert result["issues_found"] == 0
assert result["task_runs_created"] == 0
assert result["errors"] == []
@pytest.mark.asyncio
async def test_skips_project_without_token(self):
"""Should skip projects when no token is configured."""
from lazy_bird.tasks.issue_watcher import _watch_issues_async
mock_project = MagicMock()
mock_project.id = uuid.uuid4()
mock_project.name = "test-project"
mock_project.source_platform = "github"
mock_project.repo_url = "https://github.com/owner/repo"
mock_db = AsyncMock()
mock_db.execute = AsyncMock(return_value=_make_scalars_result([mock_project]))
mock_client = AsyncMock()
with (
patch(
"lazy_bird.tasks.issue_watcher.get_async_db",
return_value=_async_gen(mock_db),
),
patch(
"lazy_bird.tasks.issue_watcher.httpx.AsyncClient",
) as mock_client_cls,
patch(
"lazy_bird.tasks.issue_watcher.settings",
) as mock_settings,
):
mock_settings.GITHUB_TOKEN = None
mock_settings.GITLAB_TOKEN = None
mock_client_ctx = AsyncMock()
mock_client_ctx.__aenter__ = AsyncMock(return_value=mock_client)
mock_client_ctx.__aexit__ = AsyncMock(return_value=False)
mock_client_cls.return_value = mock_client_ctx
result = await _watch_issues_async()
assert result["projects_checked"] == 1
assert result["issues_found"] == 0
assert result["task_runs_created"] == 0
class TestWatchIssuesGitlab:
"""Test watch_issues with GitLab projects."""
@pytest.mark.asyncio
async def test_finds_gitlab_issues_and_creates_task_runs(self):
"""Should create TaskRuns for new ready GitLab issues."""
from lazy_bird.tasks.issue_watcher import _watch_issues_async
project_id = uuid.uuid4()
mock_project = MagicMock()
mock_project.id = project_id
mock_project.name = "gitlab-project"
mock_project.source_platform = "gitlab"
mock_project.repo_url = "https://gitlab.com/owner/repo"
mock_project.automation_enabled = True
mock_project.deleted_at = None
mock_db = AsyncMock()
call_count = [0]
async def mock_execute(stmt):
call_count[0] += 1
if call_count[0] == 1:
return _make_scalars_result([mock_project])
elif call_count[0] == 2:
return _make_scalar_one_result(None)
return AsyncMock()
mock_db.execute = mock_execute
mock_db.add = MagicMock()
mock_db.commit = AsyncMock()
gitlab_issues = [
{
"iid": 10,
"title": "Fix login page",
"description": "The login page has a bug...",
"web_url": "https://gitlab.com/owner/repo/-/issues/10",
"labels": ["ready"],
}
]
mock_response = MagicMock()
mock_response.json.return_value = gitlab_issues
mock_response.raise_for_status = MagicMock()
mock_client = AsyncMock()
mock_client.get = AsyncMock(return_value=mock_response)
mock_client.put = AsyncMock()
with (
patch(
"lazy_bird.tasks.issue_watcher.get_async_db",
return_value=_async_gen(mock_db),
),
patch(
"lazy_bird.tasks.issue_watcher.httpx.AsyncClient",
) as mock_client_cls,
patch(
"lazy_bird.tasks.issue_watcher.settings",
) as mock_settings,
):
mock_settings.GITHUB_TOKEN = None
mock_settings.GITLAB_TOKEN = "glpat-test-token"
mock_client_ctx = AsyncMock()
mock_client_ctx.__aenter__ = AsyncMock(return_value=mock_client)
mock_client_ctx.__aexit__ = AsyncMock(return_value=False)
mock_client_cls.return_value = mock_client_ctx
result = await _watch_issues_async()
assert result["projects_checked"] == 1
assert result["issues_found"] == 1
assert result["task_runs_created"] == 1
assert result["issues_skipped"] == 0
assert result["errors"] == []
mock_db.add.assert_called_once()
# -------------------------------------------------------------------------
# HELPERS
# -------------------------------------------------------------------------
async def _async_gen(value):
"""Create an async generator that yields a single value."""
yield value
| """Tests for issue watcher Celery task.
Tests for lazy_bird/tasks/issue_watcher.py:
- Finds ready issues and creates TaskRuns
- Skips already-processed issues (existing TaskRun)
- Handles API errors gracefully
- Skips inactive projects
"""
import uuid
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
def _make_scalars_result(items):
"""Return a mock result whose .scalars().all() returns items."""
result = AsyncMock()
result.scalars = MagicMock(return_value=MagicMock(all=MagicMock(return_value=items)))
return result
def _make_scalar_one_result(value):
"""Return a mock result whose .scalar_one_or_none() returns value."""
result = AsyncMock()
result.scalar_one_or_none = MagicMock(return_value=value)
return result
class TestParseGithubRepo:
"""Test GitHub repo URL parsing."""
def test_parse_https_url(self):
from lazy_bird.tasks.issue_watcher import _parse_github_repo
result = _parse_github_repo("https://github.com/owner/repo")
assert result == ("owner", "repo")
def test_parse_https_url_with_git(self):
from lazy_bird.tasks.issue_watcher import _parse_github_repo
result = _parse_github_repo("https://github.com/owner/repo.git")
assert result == ("owner", "repo")
def test_parse_ssh_url(self):
from lazy_bird.tasks.issue_ | [] | yusufkaraaslan/lazy-bird | tests/unit/test_issue_watcher.py |
"""
Unit tests for lazy_bird/__init__.py module
"""
import pytest
from pathlib import Path
import lazy_bird
class TestPackageMetadata:
"""Test package metadata and version information"""
def test_version_exists(self):
"""Test that __version__ is defined"""
assert hasattr(lazy_bird, "__version__")
assert isinstance(lazy_bird.__version__, str)
def test_version_format(self):
"""Test version follows semantic versioning"""
version = lazy_bird.__version__
parts = version.split(".")
assert len(parts) >= 2, "Version should have at least major.minor"
# Test that parts are numeric (or contain numeric parts)
assert parts[0].isdigit(), "Major version should be numeric"
assert parts[1].isdigit(), "Minor version should be numeric"
def test_author_exists(self):
"""Test that __author__ is defined"""
assert hasattr(lazy_bird, "__author__")
assert isinstance(lazy_bird.__author__, str)
assert len(lazy_bird.__author__) > 0
def test_license_exists(self):
"""Test that __license__ is defined"""
assert hasattr(lazy_bird, "__license__")
assert lazy_bird.__license__ == "MIT"
class TestPackagePaths:
"""Test package path constants"""
def test_package_root_exists(self):
"""Test PACKAGE_ROOT is defined and exists"""
assert hasattr(lazy_bird, "PACKAGE_ROOT")
assert isinstance(lazy_bird.PACKAGE_ROOT, Path)
assert lazy_bird.PACKAGE_ROOT.exists()
def test_package_root_is_directory(self):
"""Test PACKAGE_ROOT points to a directory"""
assert lazy_bird.PACKAGE_ROOT.is_dir()
def test_scripts_dir_defined(self):
"""Test SCRIPTS_DIR is defined"""
assert hasattr(lazy_bird, "SCRIPTS_DIR")
assert isinstance(lazy_bird.SCRIPTS_DIR, Path)
assert lazy_bird.SCRIPTS_DIR == lazy_bird.PACKAGE_ROOT / "scripts"
def test_config_dir_defined(self):
"""Test CONFIG_DIR is defined"""
assert hasattr(lazy_bird, "CONFIG_DIR")
assert isinstance(lazy_bird.CONFIG_DIR, Path)
assert lazy_bird.CONFIG_DIR == lazy_bird.PACKAGE_ROOT / "config"
def test_web_dir_defined(self):
"""Test WEB_DIR is defined"""
assert hasattr(lazy_bird, "WEB_DIR")
assert isinstance(lazy_bird.WEB_DIR, Path)
assert lazy_bird.WEB_DIR == lazy_bird.PACKAGE_ROOT / "web"
def test_docs_dir_defined(self):
"""Test DOCS_DIR is defined"""
assert hasattr(lazy_bird, "DOCS_DIR")
assert isinstance(lazy_bird.DOCS_DIR, Path)
assert lazy_bird.DOCS_DIR == lazy_bird.PACKAGE_ROOT / "Docs"
class TestPackageExports:
"""Test __all__ exports"""
def test_all_defined(self):
"""Test __all__ is defined"""
assert hasattr(lazy_bird, "__all__")
assert isinstance(lazy_bird.__all__, list)
def test_all_contains_version(self):
"""Test __all__ exports version info"""
assert "__version__" in lazy_bird.__all__
assert "__author__" in lazy_bird.__all__
assert "__license__" in lazy_bird.__all__
def test_all_contains_paths(self):
"""Test __all__ exports path constants"""
assert "PACKAGE_ROOT" in lazy_bird.__all__
assert "SCRIPTS_DIR" in lazy_bird.__all__
assert "CONFIG_DIR" in lazy_bird.__all__
assert "WEB_DIR" in lazy_bird.__all__
assert "DOCS_DIR" in lazy_bird.__all__
def test_all_items_exist(self):
"""Test all items in __all__ actually exist"""
for item in lazy_bird.__all__:
assert hasattr(lazy_bird, item), f"{item} in __all__ but not defined"
| """
Unit tests for lazy_bird/__init__.py module
"""
import pytest
from pathlib import Path
import lazy_bird
class TestPackageMetadata:
"""Test package metadata and version information"""
def test_version_exists(self):
"""Test that __version__ is defined"""
assert hasattr(lazy_bird, "__version__")
assert isinstance(lazy_bird.__version__, str)
def test_version_format(self):
"""Test version follows semantic versioning"""
version = lazy_bird.__version__
parts = version.split(".")
assert len(parts) >= 2, "Version should have at least major.minor"
# Test that parts are numeric (or contain numeric parts)
assert parts[0].isdigit(), "Major version should be numeric"
assert parts[1].isdigit(), "Minor version should be numeric"
def test_author_exists(self):
"""Test that __author__ is defined"""
assert hasattr(lazy_bird, "__author__")
assert isinstance(lazy_bird.__author__, str)
assert len(lazy_bird.__author__) > 0
def test_license_exists(self):
"""Test that __license__ is defined"""
assert hasattr(lazy_bird, "__license__")
assert lazy_bird.__license__ == "MIT"
class TestPackagePaths:
"""Test package path constants"""
def test_package_root_exists(self):
"""Test PACKAGE_ROOT is defined and exists"""
| [] | yusufkaraaslan/lazy-bird | tests/unit/test_init.py |
"""Unit tests for lazy_bird.api.routers.health module.
Tests health check, liveness, readiness, and startup probe endpoints.
"""
from unittest.mock import AsyncMock, MagicMock, Mock, patch
import pytest
from lazy_bird.api.routers.health import (
check_celery_status,
check_database,
check_redis_status,
get_system_metrics,
)
class TestCheckDatabase:
"""Test check_database helper function."""
@pytest.mark.asyncio
@patch("lazy_bird.api.routers.health.settings")
@patch("lazy_bird.api.routers.health.check_db_connection")
async def test_check_database_sync_healthy(self, mock_check, mock_settings):
"""Test sync database health check when healthy."""
mock_settings.USE_ASYNC_DB = False
mock_settings.DATABASE_URL = "postgresql://user:pass@localhost:5432/db"
mock_check.return_value = True
result = await check_database()
assert result["status"] == "healthy"
assert result["mode"] == "sync"
@pytest.mark.asyncio
@patch("lazy_bird.api.routers.health.settings")
@patch("lazy_bird.api.routers.health.check_db_connection")
async def test_check_database_sync_unhealthy(self, mock_check, mock_settings):
"""Test sync database health check when unhealthy."""
mock_settings.USE_ASYNC_DB = False
mock_settings.DATABASE_URL = "postgresql://user:pass@localhost:5432/db"
mock_check.return_value = False
result = await check_database()
assert result["status"] == "unhealthy"
@pytest.mark.asyncio
@patch("lazy_bird.api.routers.health.settings")
@patch("lazy_bird.api.routers.health.check_async_db_connection")
async def test_check_database_async_healthy(self, mock_check, mock_settings):
"""Test async database health check when healthy."""
mock_settings.USE_ASYNC_DB = True
mock_settings.DATABASE_URL = "postgresql://user:pass@localhost:5432/db"
mock_check.return_value = True
result = await check_database()
assert result["status"] == "healthy"
assert result["mode"] == "async"
@pytest.mark.asyncio
@patch("lazy_bird.api.routers.health.settings")
@patch("lazy_bird.api.routers.health.check_db_connection")
async def test_check_database_exception(self, mock_check, mock_settings):
"""Test database health check when exception occurs."""
mock_settings.USE_ASYNC_DB = False
mock_check.side_effect = Exception("Connection refused")
result = await check_database()
assert result["status"] == "unhealthy"
assert "error" in result
class TestCheckRedisStatus:
"""Test check_redis_status helper function."""
@pytest.mark.asyncio
@patch("lazy_bird.api.routers.health.settings")
@patch("lazy_bird.api.routers.health.check_redis_connection")
async def test_check_redis_sync_healthy(self, mock_check, mock_settings):
"""Test sync Redis health check when healthy."""
mock_settings.USE_ASYNC_DB = False
mock_settings.REDIS_URL = "redis://localhost:6379/0"
mock_check.return_value = True
result = await check_redis_status()
assert result["status"] == "healthy"
@pytest.mark.asyncio
@patch("lazy_bird.api.routers.health.settings")
@patch("lazy_bird.api.routers.health.check_redis_connection")
async def test_check_redis_sync_unhealthy(self, mock_check, mock_settings):
"""Test sync Redis health check when unhealthy."""
mock_settings.USE_ASYNC_DB = False
mock_settings.REDIS_URL = "redis://localhost:6379/0"
mock_check.return_value = False
result = await check_redis_status()
assert result["status"] == "unhealthy"
@pytest.mark.asyncio
@patch("lazy_bird.api.routers.health.settings")
@patch("lazy_bird.api.routers.health.check_async_redis_connection")
async def test_check_redis_async_healthy(self, mock_check, mock_settings):
"""Test async Redis health check when healthy."""
mock_settings.USE_ASYNC_DB = True
mock_settings.REDIS_URL = "redis://localhost:6379/0"
mock_check.return_value = True
result = await check_redis_status()
assert result["status"] == "healthy"
@pytest.mark.asyncio
@patch("lazy_bird.api.routers.health.settings")
async def test_check_redis_exception(self, mock_settings):
"""Test Redis health check when exception occurs."""
mock_settings.USE_ASYNC_DB = False
with patch(
"lazy_bird.api.routers.health.check_redis_connection",
side_effect=Exception("Redis down"),
):
result = await check_redis_status()
assert result["status"] == "unhealthy"
assert "error" in result
class TestCheckCeleryStatus:
"""Test check_celery_status helper function."""
@pytest.mark.asyncio
async def test_check_celery_with_workers(self):
"""Test Celery health check when workers are active."""
mock_celery = MagicMock()
mock_inspect = MagicMock()
mock_inspect.active.return_value = {
"worker1@host": [{"id": "task-1"}],
"worker2@host": [],
}
mock_celery.control.inspect.return_value = mock_inspect
with patch("lazy_bird.api.routers.health.celery_app", mock_celery, create=True):
with patch.dict("sys.modules", {"lazy_bird.tasks": MagicMock(celery_app=mock_celery)}):
result = await check_celery_status()
assert result["status"] == "healthy"
assert result["workers"] == 2
assert result["active_tasks"] == 1
@pytest.mark.asyncio
async def test_check_celery_no_workers(self):
"""Test Celery health check when no workers are active."""
mock_celery = MagicMock()
mock_inspect = MagicMock()
mock_inspect.active.return_value = None
mock_celery.control.inspect.return_value = mock_inspect
with patch.dict("sys.modules", {"lazy_bird.tasks": MagicMock(celery_app=mock_celery)}):
result = await check_celery_status()
assert result["status"] == "degraded"
assert result["workers"] == 0
@pytest.mark.asyncio
async def test_check_celery_import_error(self):
"""Test Celery health check when module not available."""
with patch.dict("sys.modules", {"lazy_bird.tasks": None}):
with patch(
"builtins.__import__",
side_effect=ImportError("No module named 'lazy_bird.tasks'"),
):
result = await check_celery_status()
assert result["status"] in ("not_configured", "unhealthy")
@pytest.mark.asyncio
async def test_check_celery_exception(self):
"""Test Celery health check when exception occurs."""
mock_celery = MagicMock()
mock_celery.control.inspect.side_effect = Exception("Connection refused")
with patch.dict("sys.modules", {"lazy_bird.tasks": MagicMock(celery_app=mock_celery)}):
result = await check_celery_status()
assert result["status"] == "unhealthy"
assert "error" in result
class TestGetSystemMetrics:
"""Test get_system_metrics function."""
@patch("lazy_bird.api.routers.health.psutil")
@patch("lazy_bird.api.routers.health.shutil")
def test_get_system_metrics_success(self, mock_shutil, mock_psutil):
"""Test successful system metrics collection."""
# Mock memory
mock_memory = MagicMock()
mock_memory.total = 16 * 1024 * 1024 * 1024 # 16 GB
mock_memory.available = 8 * 1024 * 1024 * 1024
mock_memory.used = 8 * 1024 * 1024 * 1024
mock_memory.percent = 50.0
mock_psutil.virtual_memory.return_value = mock_memory
# Mock CPU
mock_psutil.cpu_percent.return_value = 25.0
mock_psutil.cpu_count.return_value = 8
# Mock disk
mock_disk = MagicMock()
mock_disk.total = 500 * 1024 * 1024 * 1024 # 500 GB
mock_disk.used = 200 * 1024 * 1024 * 1024
mock_disk.free = 300 * 1024 * 1024 * 1024
mock_shutil.disk_usage.return_value = mock_disk
result = get_system_metrics()
assert result["cpu"]["usage_percent"] == 25.0
assert result["cpu"]["cores"] == 8
assert result["memory"]["total_mb"] == 16384
assert result["memory"]["percent"] == 50.0
assert result["disk"]["total_gb"] == 500
assert result["disk"]["free_gb"] == 300
@patch("lazy_bird.api.routers.health.psutil")
def test_get_system_metrics_exception(self, mock_psutil):
"""Test system metrics collection when exception occurs."""
mock_psutil.virtual_memory.side_effect = Exception("Permission denied")
result = get_system_metrics()
assert "error" in result
| """Unit tests for lazy_bird.api.routers.health module.
Tests health check, liveness, readiness, and startup probe endpoints.
"""
from unittest.mock import AsyncMock, MagicMock, Mock, patch
import pytest
from lazy_bird.api.routers.health import (
check_celery_status,
check_database,
check_redis_status,
get_system_metrics,
)
class TestCheckDatabase:
"""Test check_database helper function."""
@pytest.mark.asyncio
@patch("lazy_bird.api.routers.health.settings")
@patch("lazy_bird.api.routers.health.check_db_connection")
async def test_check_database_sync_healthy(self, mock_check, mock_settings):
"""Test sync database health check when healthy."""
mock_settings.USE_ASYNC_DB = False
mock_settings.DATABASE_URL = "postgresql://user:pass@localhost:5432/db"
mock_check.return_value = True
result = await check_database()
assert result["status"] == "healthy"
assert result["mode"] == "sync"
@pytest.mark.asyncio
@patch("lazy_bird.api.routers.health.settings")
@patch("lazy_bird.api.routers.health.check_db_connection")
async def test_check_database_sync_unhealthy(self, mock_check, mock_settings):
"""Test sync database health check when unhealthy."""
mock_settings.USE_ASYNC_DB = False
mock_settings.DATABASE_URL = "postgresql | [
"# yusufkaraaslan/lazy-bird:lazy_bird/api/routers/health.py\ncheck_celery_status"
] | yusufkaraaslan/lazy-bird | tests/unit/test_health.py |
"""Unit tests for GitService.
Tests git worktree operations, branch management, and commit/push functionality.
"""
import subprocess
from pathlib import Path
from unittest.mock import MagicMock, Mock, patch
import pytest
from lazy_bird.services.git_service import (
GitCommandError,
GitService,
GitServiceError,
WorktreeExistsError,
)
class TestGitServiceInit:
"""Test GitService initialization."""
def test_init_with_defaults(self, tmp_path):
"""Test GitService initialization with default settings."""
project_path = tmp_path / "project"
project_path.mkdir()
service = GitService(str(project_path))
assert service.project_path == project_path
assert service.worktree_base.exists() # Should be created
assert service.git_user_name == "Lazy-Bird Bot"
assert service.git_user_email == "bot@lazy-bird.dev"
def test_init_with_custom_settings(self, tmp_path):
"""Test GitService with custom settings."""
project_path = tmp_path / "project"
project_path.mkdir()
worktree_base = tmp_path / "worktrees"
service = GitService(
project_path=str(project_path),
worktree_base=str(worktree_base),
git_user_name="Test User",
git_user_email="test@example.com",
)
assert service.project_path == project_path
assert service.worktree_base == worktree_base
assert service.git_user_name == "Test User"
assert service.git_user_email == "test@example.com"
assert worktree_base.exists() # Should be created
class TestRunGit:
"""Test _run_git helper method."""
def test_run_git_success(self, tmp_path):
"""Test successful git command execution."""
service = GitService(str(tmp_path))
with patch("subprocess.run") as mock_run:
mock_run.return_value = subprocess.CompletedProcess(
args=["git", "status"],
returncode=0,
stdout="On branch main",
stderr="",
)
result = service._run_git(["status"])
assert result.returncode == 0
assert "On branch main" in result.stdout
mock_run.assert_called_once()
def test_run_git_failure(self, tmp_path):
"""Test git command failure."""
service = GitService(str(tmp_path))
with patch("subprocess.run") as mock_run:
mock_run.return_value = subprocess.CompletedProcess(
args=["git", "invalid"],
returncode=1,
stdout="",
stderr="fatal: invalid command",
)
with pytest.raises(GitCommandError) as exc_info:
service._run_git(["invalid"])
assert "fatal: invalid command" in str(exc_info.value)
assert exc_info.value.return_code == 1
def test_run_git_no_check(self, tmp_path):
"""Test git command with check=False doesn't raise exception."""
service = GitService(str(tmp_path))
with patch("subprocess.run") as mock_run:
mock_run.return_value = subprocess.CompletedProcess(
args=["git", "invalid"],
returncode=1,
stdout="",
stderr="error",
)
result = service._run_git(["invalid"], check=False)
assert result.returncode == 1
class TestGetBaseBranch:
"""Test _get_base_branch method."""
def test_get_base_branch_main(self, tmp_path):
"""Test detecting 'main' as base branch."""
service = GitService(str(tmp_path))
with patch.object(service, "_run_git") as mock_run:
# First call (check origin/main) returns success
mock_run.return_value = subprocess.CompletedProcess(
args=[], returncode=0, stdout="", stderr=""
)
branch = service._get_base_branch()
assert branch == "main"
def test_get_base_branch_master(self, tmp_path):
"""Test detecting 'master' as base branch."""
service = GitService(str(tmp_path))
with patch.object(service, "_run_git") as mock_run:
# First call (check origin/main) fails, second (origin/master) succeeds
mock_run.side_effect = [
subprocess.CompletedProcess(args=[], returncode=1, stdout="", stderr=""),
subprocess.CompletedProcess(args=[], returncode=0, stdout="", stderr=""),
]
branch = service._get_base_branch()
assert branch == "master"
def test_get_base_branch_not_found(self, tmp_path):
"""Test error when no base branch found."""
service = GitService(str(tmp_path))
with patch.object(service, "_run_git") as mock_run:
# All checks fail
mock_run.return_value = subprocess.CompletedProcess(
args=[], returncode=1, stdout="", stderr=""
)
with pytest.raises(GitServiceError) as exc_info:
service._get_base_branch()
assert "neither main nor master found" in str(exc_info.value).lower()
class TestCreateWorktree:
"""Test create_worktree method."""
def test_create_worktree_success(self, tmp_path):
"""Test successful worktree creation."""
project_path = tmp_path / "project"
project_path.mkdir()
worktree_base = tmp_path / "worktrees"
service = GitService(project_path=str(project_path), worktree_base=str(worktree_base))
with patch.object(service, "_run_git") as mock_run:
with patch.object(service, "_get_base_branch", return_value="main"):
# Mock all git commands to succeed
mock_run.return_value = subprocess.CompletedProcess(
args=[], returncode=0, stdout="", stderr=""
)
worktree_path, branch_name = service.create_worktree(
project_id="test-proj",
task_id=42,
)
assert "test-proj" in str(worktree_path)
assert "42" in str(worktree_path)
assert branch_name == "feature-test-proj-42"
def test_create_worktree_already_exists(self, tmp_path):
"""Test worktree creation when worktree already exists."""
project_path = tmp_path / "project"
project_path.mkdir()
worktree_base = tmp_path / "worktrees"
service = GitService(project_path=str(project_path), worktree_base=str(worktree_base))
existing_worktree = service.worktree_base / "lazy-bird-agent-test-proj-42"
existing_worktree.mkdir(parents=True)
with pytest.raises(WorktreeExistsError):
service.create_worktree(project_id="test-proj", task_id=42)
def test_create_worktree_with_force(self, tmp_path):
"""Test worktree creation with force=True."""
project_path = tmp_path / "project"
project_path.mkdir()
worktree_base = tmp_path / "worktrees"
service = GitService(project_path=str(project_path), worktree_base=str(worktree_base))
existing_worktree = service.worktree_base / "lazy-bird-agent-test-proj-42"
existing_worktree.mkdir(parents=True)
with patch.object(service, "_run_git") as mock_run:
with patch.object(service, "_get_base_branch", return_value="main"):
with patch.object(service, "cleanup_worktree"):
mock_run.return_value = subprocess.CompletedProcess(
args=[], returncode=0, stdout="", stderr=""
)
worktree_path, branch_name = service.create_worktree(
project_id="test-proj",
task_id=42,
force=True,
)
assert branch_name == "feature-test-proj-42"
class TestCommitChanges:
"""Test commit_changes method."""
def test_commit_changes_success(self, tmp_path):
"""Test successful commit."""
service = GitService(str(tmp_path))
worktree_path = tmp_path / "worktree"
worktree_path.mkdir()
with patch("subprocess.run") as mock_run:
# Mock git add
mock_run.side_effect = [
subprocess.CompletedProcess(
args=["git", "add", "-A"],
returncode=0,
stdout="",
stderr="",
),
# Mock git commit
subprocess.CompletedProcess(
args=["git", "commit"],
returncode=0,
stdout="",
stderr="",
),
# Mock git rev-parse (get commit hash)
subprocess.CompletedProcess(
args=["git", "rev-parse", "--short", "HEAD"],
returncode=0,
stdout="abc1234\n",
stderr="",
),
]
commit_hash = service.commit_changes(
worktree_path=worktree_path,
message="Test commit",
)
assert commit_hash == "abc1234"
assert mock_run.call_count == 3
def test_commit_changes_with_project_id(self, tmp_path):
"""Test commit with project_id for logging."""
service = GitService(str(tmp_path))
worktree_path = tmp_path / "worktree"
worktree_path.mkdir()
with patch("subprocess.run") as mock_run:
mock_run.side_effect = [
subprocess.CompletedProcess(args=[], returncode=0, stdout="", stderr=""),
subprocess.CompletedProcess(args=[], returncode=0, stdout="", stderr=""),
subprocess.CompletedProcess(args=[], returncode=0, stdout="abc1234", stderr=""),
]
commit_hash = service.commit_changes(
worktree_path=worktree_path,
message="Test",
project_id="my-project",
)
assert commit_hash == "abc1234"
class TestPushBranch:
"""Test push_branch method."""
def test_push_branch_success(self, tmp_path):
"""Test successful branch push."""
service = GitService(str(tmp_path))
worktree_path = tmp_path / "worktree"
worktree_path.mkdir()
with patch.object(service, "_run_git") as mock_run:
# First call gets branch name, second pushes
mock_run.side_effect = [
subprocess.CompletedProcess(
args=[], returncode=0, stdout="feature-test-42\n", stderr=""
),
subprocess.CompletedProcess(args=[], returncode=0, stdout="", stderr=""),
]
service.push_branch(worktree_path)
assert mock_run.call_count == 2
def test_push_branch_with_force(self, tmp_path):
"""Test force push."""
service = GitService(str(tmp_path))
worktree_path = tmp_path / "worktree"
worktree_path.mkdir()
with patch.object(service, "_run_git") as mock_run:
mock_run.side_effect = [
subprocess.CompletedProcess(
args=[], returncode=0, stdout="feature-test-42\n", stderr=""
),
subprocess.CompletedProcess(args=[], returncode=0, stdout="", stderr=""),
]
service.push_branch(worktree_path, force=True)
# Check that --force-with-lease was used
push_call = mock_run.call_args_list[1]
assert "--force-with-lease" in push_call[0][0]
class TestCleanupWorktree:
"""Test cleanup_worktree method."""
def test_cleanup_worktree_success(self, tmp_path):
"""Test successful worktree cleanup."""
service = GitService(str(tmp_path))
worktree_path = tmp_path / "worktree"
worktree_path.mkdir()
with patch.object(service, "_run_git") as mock_run:
mock_run.return_value = subprocess.CompletedProcess(
args=[], returncode=0, stdout="", stderr=""
)
service.cleanup_worktree(
worktree_path=worktree_path,
branch_name="feature-test-42",
)
# Should call: worktree remove, worktree prune, branch -D
assert mock_run.call_count == 3
def test_cleanup_worktree_fallback_to_rm(self, tmp_path):
"""Test cleanup falls back to rm -rf if git worktree remove fails."""
service = GitService(str(tmp_path))
worktree_path = tmp_path / "worktree"
worktree_path.mkdir()
with patch.object(service, "_run_git") as mock_run:
# First call (worktree remove) raises GitCommandError
def side_effect_fn(args, **kwargs):
if "worktree" in args and "remove" in args:
raise GitCommandError(
command="git worktree remove", return_code=1, stderr="error"
)
return subprocess.CompletedProcess(args=[], returncode=0, stdout="", stderr="")
mock_run.side_effect = side_effect_fn
with patch("lazy_bird.services.git_service.shutil.rmtree") as mock_rmtree:
service.cleanup_worktree(
worktree_path=worktree_path,
branch_name="feature-test-42",
)
# Should have called shutil.rmtree as fallback
mock_rmtree.assert_called_once()
class TestGetDiffStats:
"""Test get_diff_stats method."""
def test_get_diff_stats_with_changes(self, tmp_path):
"""Test getting diff statistics."""
service = GitService(str(tmp_path))
worktree_path = tmp_path / "worktree"
worktree_path.mkdir()
with patch.object(service, "_run_git") as mock_run:
mock_run.return_value = subprocess.CompletedProcess(
args=[],
returncode=0,
stdout="file1.py | 10 ++++++++++\n"
"file2.py | 5 ++---\n"
"2 files changed, 12 insertions(+), 3 deletions(-)\n",
stderr="",
)
stats = service.get_diff_stats(worktree_path)
assert stats["files_changed"] == 2
assert stats["insertions"] == 12
assert stats["deletions"] == 3
def test_get_diff_stats_no_changes(self, tmp_path):
"""Test getting diff stats when no changes."""
service = GitService(str(tmp_path))
worktree_path = tmp_path / "worktree"
worktree_path.mkdir()
with patch.object(service, "_run_git") as mock_run:
mock_run.return_value = subprocess.CompletedProcess(
args=[],
returncode=0,
stdout="",
stderr="",
)
stats = service.get_diff_stats(worktree_path)
assert stats["files_changed"] == 0
assert stats["insertions"] == 0
assert stats["deletions"] == 0
class TestGitServiceErrors:
"""Test GitService exception classes."""
def test_git_command_error(self):
"""Test GitCommandError exception."""
error = GitCommandError(
command="git status",
return_code=1,
stderr="fatal: not a git repository",
)
assert error.command == "git status"
assert error.return_code == 1
assert "fatal: not a git repository" in error.stderr
assert "git status" in str(error)
def test_worktree_exists_error(self):
"""Test WorktreeExistsError exception."""
error = WorktreeExistsError("Worktree exists at /tmp/worktree")
assert "Worktree exists" in str(error)
assert isinstance(error, GitServiceError)
def test_git_service_error(self):
"""Test base GitServiceError."""
error = GitServiceError("Something went wrong")
assert "Something went wrong" in str(error)
| """Unit tests for GitService.
Tests git worktree operations, branch management, and commit/push functionality.
"""
import subprocess
from pathlib import Path
from unittest.mock import MagicMock, Mock, patch
import pytest
from lazy_bird.services.git_service import (
GitCommandError,
GitService,
GitServiceError,
WorktreeExistsError,
)
class TestGitServiceInit:
"""Test GitService initialization."""
def test_init_with_defaults(self, tmp_path):
"""Test GitService initialization with default settings."""
project_path = tmp_path / "project"
project_path.mkdir()
service = GitService(str(project_path))
assert service.project_path == project_path
assert service.worktree_base.exists() # Should be created
assert service.git_user_name == "Lazy-Bird Bot"
assert service.git_user_email == "bot@lazy-bird.dev"
def test_init_with_custom_settings(self, tmp_path):
"""Test GitService with custom settings."""
project_path = tmp_path / "project"
project_path.mkdir()
worktree_base = tmp_path / "worktrees"
service = GitService(
project_path=str(project_path),
worktree_base=str(worktree_base),
git_user_name="Test User",
git_user_email="test@example.com",
| [
"# yusufkaraaslan/lazy-bird:lazy_bird/services/git_service.py\nGitCommandError"
] | yusufkaraaslan/lazy-bird | tests/unit/test_git_service.py |
"""Unit tests for custom exception classes.
Tests RFC 7807 Problem Details error responses.
"""
import pytest
from fastapi import status
from lazy_bird.api.exceptions import (
AuthenticationError,
AuthorizationError,
ResourceConflictError,
ResourceNotFoundError,
ValidationError,
)
class TestResourceNotFoundError:
"""Test ResourceNotFoundError (404) exception."""
def test_basic_initialization(self):
"""Test basic exception initialization."""
error = ResourceNotFoundError(
resource_type="Project",
resource_id="123",
)
assert error.status_code == status.HTTP_404_NOT_FOUND
assert error.resource_type == "Project"
assert error.resource_id == "123"
assert "Project" in error.detail
assert "123" in error.detail
def test_custom_detail_message(self):
"""Test exception with custom detail message."""
error = ResourceNotFoundError(
resource_type="Project",
resource_id="abc-123",
detail="Custom error message",
)
assert error.detail == "Custom error message"
def test_headers_property(self):
"""Test that headers property returns correct format."""
error = ResourceNotFoundError(
resource_type="ApiKey",
resource_id="key-456",
)
headers = error.headers
assert headers["Content-Type"] == "application/problem+json"
def test_to_problem_details(self):
"""Test conversion to RFC 7807 Problem Details format."""
error = ResourceNotFoundError(
resource_type="TaskRun",
resource_id="task-789",
)
problem_details = error.to_problem_details()
assert problem_details["type"] == "about:blank"
assert problem_details["title"] == "Not Found"
assert problem_details["status"] == 404
assert problem_details["detail"] == error.detail
assert "resource_type" in problem_details
assert problem_details["resource_type"] == "TaskRun"
assert "resource_id" in problem_details
assert problem_details["resource_id"] == "task-789"
class TestResourceConflictError:
"""Test ResourceConflictError (409) exception."""
def test_basic_initialization(self):
"""Test basic exception initialization."""
error = ResourceConflictError(
detail="Resource already exists",
conflict_field="name",
)
assert error.status_code == status.HTTP_409_CONFLICT
assert error.detail == "Resource already exists"
assert error.conflict_field == "name"
def test_with_conflict_value(self):
"""Test exception with conflict_value."""
error = ResourceConflictError(
detail="Name already taken",
conflict_field="name",
conflict_value="my-project",
)
assert error.conflict_value == "my-project"
def test_to_problem_details(self):
"""Test conversion to RFC 7807 Problem Details format."""
error = ResourceConflictError(
detail="Duplicate slug",
conflict_field="slug",
conflict_value="duplicate-slug",
)
problem_details = error.to_problem_details()
assert problem_details["type"] == "about:blank"
assert problem_details["title"] == "Conflict"
assert problem_details["status"] == 409
assert problem_details["detail"] == "Duplicate slug"
assert problem_details["conflict_field"] == "slug"
assert problem_details["conflict_value"] == "duplicate-slug"
def test_without_conflict_value(self):
"""Test that conflict_value is optional."""
error = ResourceConflictError(
detail="Invalid state transition",
conflict_field="status",
)
problem_details = error.to_problem_details()
assert "conflict_value" not in problem_details
class TestAuthenticationError:
"""Test AuthenticationError (401) exception."""
def test_basic_initialization(self):
"""Test basic exception initialization."""
error = AuthenticationError(detail="Invalid API key")
assert error.status_code == status.HTTP_401_UNAUTHORIZED
assert error.detail == "Invalid API key"
def test_default_detail_message(self):
"""Test default detail message."""
error = AuthenticationError()
assert "authentication" in error.detail.lower()
def test_www_authenticate_header(self):
"""Test WWW-Authenticate header."""
error = AuthenticationError()
headers = error.headers
assert "WWW-Authenticate" in headers
assert headers["WWW-Authenticate"] == 'Bearer realm="api"'
def test_to_problem_details(self):
"""Test conversion to RFC 7807 Problem Details format."""
error = AuthenticationError(detail="Token expired")
problem_details = error.to_problem_details()
assert problem_details["type"] == "about:blank"
assert problem_details["title"] == "Unauthorized"
assert problem_details["status"] == 401
assert problem_details["detail"] == "Token expired"
class TestAuthorizationError:
"""Test AuthorizationError (403) exception."""
def test_basic_initialization(self):
"""Test basic exception initialization."""
error = AuthorizationError(
detail="Insufficient permissions",
required_scope="admin",
)
assert error.status_code == status.HTTP_403_FORBIDDEN
assert error.detail == "Insufficient permissions"
assert error.required_scope == "admin"
def test_default_detail_message(self):
"""Test default detail message."""
error = AuthorizationError()
assert "permission" in error.detail.lower() or "forbidden" in error.detail.lower()
def test_to_problem_details(self):
"""Test conversion to RFC 7807 Problem Details format."""
error = AuthorizationError(
detail="Admin access required",
required_scope="admin",
user_scopes=["read", "write"],
)
problem_details = error.to_problem_details()
assert problem_details["type"] == "about:blank"
assert problem_details["title"] == "Forbidden"
assert problem_details["status"] == 403
assert problem_details["detail"] == "Admin access required"
assert problem_details["required_scope"] == "admin"
assert problem_details["user_scopes"] == ["read", "write"]
class TestValidationError:
"""Test ValidationError (422) exception."""
def test_basic_initialization(self):
"""Test basic exception initialization."""
error = ValidationError(
detail="Invalid input data",
field="email",
reason="Invalid email format",
)
assert error.status_code == status.HTTP_422_UNPROCESSABLE_ENTITY
assert error.detail == "Invalid input data"
assert error.field == "email"
assert error.reason == "Invalid email format"
def test_to_problem_details(self):
"""Test conversion to RFC 7807 Problem Details format."""
error = ValidationError(
detail="Validation failed",
field="max_tokens",
reason="Must be between 1 and 100000",
)
problem_details = error.to_problem_details()
assert problem_details["type"] == "about:blank"
assert problem_details["title"] == "Unprocessable Entity"
assert problem_details["status"] == 422
assert problem_details["detail"] == "Validation failed"
assert problem_details["field"] == "max_tokens"
assert problem_details["reason"] == "Must be between 1 and 100000"
class TestExceptionHeaders:
"""Test exception headers and content types."""
def test_all_exceptions_have_problem_json_header(self):
"""Test that all exceptions return application/problem+json content type."""
exceptions = [
ResourceNotFoundError("Project", "123"),
ResourceConflictError("Conflict", "name"),
AuthenticationError(),
AuthorizationError(),
ValidationError("Error", "field"),
]
for exception in exceptions:
assert exception.headers["Content-Type"] == "application/problem+json"
def test_authentication_error_has_www_authenticate(self):
"""Test that authentication error includes WWW-Authenticate header."""
error = AuthenticationError()
assert "WWW-Authenticate" in error.headers
assert error.headers["WWW-Authenticate"] == 'Bearer realm="api"'
class TestExceptionInheritance:
"""Test exception class inheritance."""
def test_all_inherit_from_base_exception(self):
"""Test that all custom exceptions inherit from Exception."""
from lazy_bird.api.exceptions import LazyBirdException
exceptions = [
ResourceNotFoundError("Project", "123"),
ResourceConflictError("Conflict", "name"),
AuthenticationError(),
AuthorizationError(),
ValidationError("Error", "field"),
]
for exception in exceptions:
assert isinstance(exception, LazyBirdException)
assert isinstance(exception, Exception)
def test_exceptions_can_be_raised_and_caught(self):
"""Test that exceptions can be raised and caught."""
with pytest.raises(ResourceNotFoundError) as exc_info:
raise ResourceNotFoundError("Project", "123")
assert exc_info.value.status_code == 404
assert "Project" in exc_info.value.detail
with pytest.raises(ResourceConflictError) as exc_info:
raise ResourceConflictError("Conflict", "name")
assert exc_info.value.status_code == 409
| """Unit tests for custom exception classes.
Tests RFC 7807 Problem Details error responses.
"""
import pytest
from fastapi import status
from lazy_bird.api.exceptions import (
AuthenticationError,
AuthorizationError,
ResourceConflictError,
ResourceNotFoundError,
ValidationError,
)
class TestResourceNotFoundError:
"""Test ResourceNotFoundError (404) exception."""
def test_basic_initialization(self):
"""Test basic exception initialization."""
error = ResourceNotFoundError(
resource_type="Project",
resource_id="123",
)
assert error.status_code == status.HTTP_404_NOT_FOUND
assert error.resource_type == "Project"
assert error.resource_id == "123"
assert "Project" in error.detail
assert "123" in error.detail
def test_custom_detail_message(self):
"""Test exception with custom detail message."""
error = ResourceNotFoundError(
resource_type="Project",
resource_id="abc-123",
detail="Custom error message",
)
assert error.detail == "Custom error message"
def test_headers_property(self):
"""Test that headers property returns correct format."""
error = ResourceNotFoundError(
resource_type="ApiKey",
resource_id="key-456",
| [
"# yusufkaraaslan/lazy-bird:lazy_bird/api/exceptions.py\nAuthenticationError"
] | yusufkaraaslan/lazy-bird | tests/unit/test_exceptions.py |
"""Unit tests for API dependencies (authentication).
Tests RequireRead, RequireWrite, RequireAdmin authentication dependencies.
"""
from datetime import datetime, timezone
from uuid import uuid4
import pytest
from fastapi import HTTPException
from lazy_bird.api.dependencies import RequireAdmin, RequireRead, RequireWrite
from lazy_bird.models.api_key import ApiKey
class TestRequireRead:
"""Test RequireRead dependency."""
@pytest.mark.asyncio
async def test_valid_read_scope(self):
"""Test authentication with valid read scope."""
# Create mock API key with read scope
api_key = ApiKey(
id=uuid4(),
key_hash="test-hash",
key_prefix="lb_test1",
name="Test Key",
scopes=["read"],
is_active=True,
created_at=datetime.now(timezone.utc),
)
# Call dependency directly with ApiKey object
result = await RequireRead(api_key=api_key)
assert result == api_key
assert result.scopes == ["read"]
@pytest.mark.asyncio
async def test_valid_write_scope_allowed(self):
"""Test that write scope is allowed for read operations."""
api_key = ApiKey(
id=uuid4(),
key_hash="test-hash",
key_prefix="lb_test2",
name="Test Key",
scopes=["write"], # write includes read
is_active=True,
created_at=datetime.now(timezone.utc),
)
result = await RequireRead(api_key=api_key)
assert result == api_key
@pytest.mark.asyncio
async def test_valid_admin_scope_allowed(self):
"""Test that admin scope is allowed for read operations."""
api_key = ApiKey(
id=uuid4(),
key_hash="test-hash",
key_prefix="lb_test3",
name="Test Key",
scopes=["admin"], # admin includes all
is_active=True,
created_at=datetime.now(timezone.utc),
)
result = await RequireRead(api_key=api_key)
assert result == api_key
@pytest.mark.asyncio
async def test_insufficient_scope(self):
"""Test authorization fails with insufficient scope."""
api_key = ApiKey(
id=uuid4(),
key_hash="test-hash",
key_prefix="lb_test5",
name="Test Key",
scopes=[], # No scopes
is_active=True,
created_at=datetime.now(timezone.utc),
)
with pytest.raises(HTTPException) as exc_info:
await RequireRead(api_key=api_key)
assert exc_info.value.status_code == 403
assert (
"permission" in exc_info.value.detail.lower()
or "scope" in exc_info.value.detail.lower()
)
class TestRequireWrite:
"""Test RequireWrite dependency."""
@pytest.mark.asyncio
async def test_valid_write_scope(self):
"""Test authentication with valid write scope."""
api_key = ApiKey(
id=uuid4(),
key_hash="test-hash",
key_prefix="lb_write",
name="Write Key",
scopes=["write"],
is_active=True,
created_at=datetime.now(timezone.utc),
)
result = await RequireWrite(api_key=api_key)
assert result == api_key
@pytest.mark.asyncio
async def test_read_scope_insufficient(self):
"""Test that read-only scope is insufficient for write operations."""
api_key = ApiKey(
id=uuid4(),
key_hash="test-hash",
key_prefix="lb_read",
name="Read Key",
scopes=["read"], # Only read, not write
is_active=True,
created_at=datetime.now(timezone.utc),
)
with pytest.raises(HTTPException) as exc_info:
await RequireWrite(api_key=api_key)
assert exc_info.value.status_code == 403
assert "permission" in exc_info.value.detail.lower()
@pytest.mark.asyncio
async def test_admin_scope_allowed(self):
"""Test that admin scope is allowed for write operations."""
api_key = ApiKey(
id=uuid4(),
key_hash="test-hash",
key_prefix="lb_admin",
name="Admin Key",
scopes=["admin"], # admin includes write
is_active=True,
created_at=datetime.now(timezone.utc),
)
result = await RequireWrite(api_key=api_key)
assert result == api_key
class TestRequireAdmin:
"""Test RequireAdmin dependency."""
@pytest.mark.asyncio
async def test_valid_admin_scope(self):
"""Test authentication with valid admin scope."""
api_key = ApiKey(
id=uuid4(),
key_hash="test-hash",
key_prefix="lb_admin",
name="Admin Key",
scopes=["admin"],
is_active=True,
created_at=datetime.now(timezone.utc),
)
result = await RequireAdmin(api_key=api_key)
assert result == api_key
@pytest.mark.asyncio
async def test_read_scope_insufficient(self):
"""Test that read scope is insufficient for admin operations."""
api_key = ApiKey(
id=uuid4(),
key_hash="test-hash",
key_prefix="lb_read",
name="Read Key",
scopes=["read"],
is_active=True,
created_at=datetime.now(timezone.utc),
)
with pytest.raises(HTTPException) as exc_info:
await RequireAdmin(api_key=api_key)
assert exc_info.value.status_code == 403
assert "permission" in exc_info.value.detail.lower()
@pytest.mark.asyncio
async def test_write_scope_insufficient(self):
"""Test that write scope is insufficient for admin operations."""
api_key = ApiKey(
id=uuid4(),
key_hash="test-hash",
key_prefix="lb_write",
name="Write Key",
scopes=["write"], # No admin
is_active=True,
created_at=datetime.now(timezone.utc),
)
with pytest.raises(HTTPException) as exc_info:
await RequireAdmin(api_key=api_key)
assert exc_info.value.status_code == 403
assert "permission" in exc_info.value.detail.lower()
class TestScopeHierarchy:
"""Test scope hierarchy (admin > write > read)."""
@pytest.mark.asyncio
async def test_admin_can_do_everything(self):
"""Test that admin scope can perform read, write, and admin operations."""
api_key = ApiKey(
id=uuid4(),
key_hash="test-hash",
key_prefix="lb_admin",
name="Admin Key",
scopes=["admin"],
is_active=True,
created_at=datetime.now(timezone.utc),
)
# Admin can do read operations
result = await RequireRead(api_key=api_key)
assert result == api_key
# Admin can do write operations
result = await RequireWrite(api_key=api_key)
assert result == api_key
# Admin can do admin operations
result = await RequireAdmin(api_key=api_key)
assert result == api_key
@pytest.mark.asyncio
async def test_write_can_do_read(self):
"""Test that write scope can perform read operations."""
api_key = ApiKey(
id=uuid4(),
key_hash="test-hash",
key_prefix="lb_write",
name="Write Key",
scopes=["write"],
is_active=True,
created_at=datetime.now(timezone.utc),
)
# Write can do read operations
result = await RequireRead(api_key=api_key)
assert result == api_key
# Write can do write operations
result = await RequireWrite(api_key=api_key)
assert result == api_key
@pytest.mark.asyncio
async def test_read_cannot_do_write_or_admin(self):
"""Test that read scope cannot perform write or admin operations."""
api_key = ApiKey(
id=uuid4(),
key_hash="test-hash",
key_prefix="lb_read",
name="Read Key",
scopes=["read"],
is_active=True,
created_at=datetime.now(timezone.utc),
)
# Read can do read operations
result = await RequireRead(api_key=api_key)
assert result == api_key
# Read CANNOT do write operations
with pytest.raises(HTTPException) as exc_info:
await RequireWrite(api_key=api_key)
assert exc_info.value.status_code == 403
# Read CANNOT do admin operations
with pytest.raises(HTTPException) as exc_info:
await RequireAdmin(api_key=api_key)
assert exc_info.value.status_code == 403
| """Unit tests for API dependencies (authentication).
Tests RequireRead, RequireWrite, RequireAdmin authentication dependencies.
"""
from datetime import datetime, timezone
from uuid import uuid4
import pytest
from fastapi import HTTPException
from lazy_bird.api.dependencies import RequireAdmin, RequireRead, RequireWrite
from lazy_bird.models.api_key import ApiKey
class TestRequireRead:
"""Test RequireRead dependency."""
@pytest.mark.asyncio
async def test_valid_read_scope(self):
"""Test authentication with valid read scope."""
# Create mock API key with read scope
api_key = ApiKey(
id=uuid4(),
key_hash="test-hash",
key_prefix="lb_test1",
name="Test Key",
scopes=["read"],
is_active=True,
created_at=datetime.now(timezone.utc),
)
# Call dependency directly with ApiKey object
result = await RequireRead(api_key=api_key)
assert result == api_key
assert result.scopes == ["read"]
@pytest.mark.asyncio
async def test_valid_write_scope_allowed(self):
"""Test that write scope is allowed for read operations."""
api_key = ApiKey(
id=uuid4(),
| [
"# fastapi/fastapi:fastapi/exceptions.py\nHTTPException",
"# yusufkaraaslan/lazy-bird:lazy_bird/models/api_key.py\nApiKey"
] | yusufkaraaslan/lazy-bird | tests/unit/test_dependencies.py |
"""Unit tests for lazy_bird.core.database module.
Tests database initialization, session management, and health checks.
"""
from unittest.mock import MagicMock, patch
import pytest
from lazy_bird.core.database import Base
class TestBase:
"""Test declarative base."""
def test_base_exists(self):
"""Test Base declarative base exists."""
assert Base is not None
assert hasattr(Base, "metadata")
def test_base_has_tables_after_model_import(self):
"""Test Base has tables after importing models."""
import lazy_bird.models # noqa: F401
# Should have at least some tables registered
assert len(Base.metadata.tables) > 0
class TestGetDb:
"""Test get_db synchronous dependency."""
def test_get_db_yields_session(self):
"""Test get_db yields a session and closes it."""
from lazy_bird.core.database import get_db
mock_session = MagicMock()
with patch("lazy_bird.core.database.SessionLocal", return_value=mock_session):
gen = get_db()
session = next(gen)
assert session is mock_session
# Complete the generator
try:
next(gen)
except StopIteration:
pass
mock_session.commit.assert_called_once()
mock_session.close.assert_called_once()
def test_get_db_rolls_back_on_exception(self):
"""Test get_db rolls back on exception."""
from lazy_bird.core.database import get_db
mock_session = MagicMock()
with patch("lazy_bird.core.database.SessionLocal", return_value=mock_session):
gen = get_db()
session = next(gen)
# Simulate exception during request handling
with pytest.raises(ValueError):
gen.throw(ValueError("Test error"))
mock_session.rollback.assert_called_once()
mock_session.close.assert_called_once()
class TestGetAsyncDb:
"""Test get_async_db asynchronous dependency."""
@pytest.mark.asyncio
async def test_get_async_db_yields_session(self):
"""Test get_async_db yields a session."""
from unittest.mock import AsyncMock
from lazy_bird.core.database import get_async_db
mock_session = MagicMock()
mock_session.commit = AsyncMock(return_value=None)
mock_session.rollback = AsyncMock(return_value=None)
mock_session.__aenter__ = AsyncMock(return_value=mock_session)
mock_session.__aexit__ = AsyncMock(return_value=None)
with patch("lazy_bird.core.database.AsyncSessionLocal", return_value=mock_session):
async for session in get_async_db():
assert session is mock_session
class TestCheckDbConnection:
"""Test check_db_connection function."""
def test_check_db_connection_success(self):
"""Test successful database connection check."""
from lazy_bird.core.database import check_db_connection
mock_conn = MagicMock()
mock_conn.__enter__ = MagicMock(return_value=mock_conn)
mock_conn.__exit__ = MagicMock(return_value=False)
with patch("lazy_bird.core.database.engine") as mock_engine:
mock_engine.connect.return_value = mock_conn
result = check_db_connection()
assert result is True
def test_check_db_connection_failure(self):
"""Test failed database connection check."""
from lazy_bird.core.database import check_db_connection
with patch("lazy_bird.core.database.engine") as mock_engine:
mock_engine.connect.side_effect = Exception("Connection refused")
result = check_db_connection()
assert result is False
class TestInitDb:
"""Test init_db and drop_db functions."""
def test_init_db(self):
"""Test init_db calls create_all."""
from lazy_bird.core.database import init_db
with patch.object(Base.metadata, "create_all") as mock_create:
init_db()
mock_create.assert_called_once()
def test_drop_db(self):
"""Test drop_db calls drop_all."""
from lazy_bird.core.database import drop_db
with patch.object(Base.metadata, "drop_all") as mock_drop:
drop_db()
mock_drop.assert_called_once()
class TestConnectionPoolEvents:
"""Test connection pool event listeners."""
@patch("lazy_bird.core.database.settings")
def test_receive_connect_with_echo(self, mock_settings, capsys):
"""Test connect event listener with echo enabled."""
mock_settings.DB_ECHO = True
from lazy_bird.core.database import receive_connect
receive_connect("mock_dbapi_conn", "mock_record")
captured = capsys.readouterr()
assert "Database connection opened" in captured.out
@patch("lazy_bird.core.database.settings")
def test_receive_connect_without_echo(self, mock_settings, capsys):
"""Test connect event listener with echo disabled."""
mock_settings.DB_ECHO = False
from lazy_bird.core.database import receive_connect
receive_connect("mock_dbapi_conn", "mock_record")
captured = capsys.readouterr()
assert captured.out == ""
@patch("lazy_bird.core.database.settings")
def test_receive_checkout_with_echo(self, mock_settings, capsys):
"""Test checkout event listener with echo enabled."""
mock_settings.DB_ECHO = True
from lazy_bird.core.database import receive_checkout
receive_checkout("mock_dbapi_conn", "mock_record", "mock_proxy")
captured = capsys.readouterr()
assert "Database connection checked out" in captured.out
@patch("lazy_bird.core.database.settings")
def test_receive_checkin_with_echo(self, mock_settings, capsys):
"""Test checkin event listener with echo enabled."""
mock_settings.DB_ECHO = True
from lazy_bird.core.database import receive_checkin
receive_checkin("mock_dbapi_conn", "mock_record")
captured = capsys.readouterr()
assert "Database connection checked in" in captured.out
| """Unit tests for lazy_bird.core.database module.
Tests database initialization, session management, and health checks.
"""
from unittest.mock import MagicMock, patch
import pytest
from lazy_bird.core.database import Base
class TestBase:
"""Test declarative base."""
def test_base_exists(self):
"""Test Base declarative base exists."""
assert Base is not None
assert hasattr(Base, "metadata")
def test_base_has_tables_after_model_import(self):
"""Test Base has tables after importing models."""
import lazy_bird.models # noqa: F401
# Should have at least some tables registered
assert len(Base.metadata.tables) > 0
class TestGetDb:
"""Test get_db synchronous dependency."""
def test_get_db_yields_session(self):
"""Test get_db yields a session and closes it."""
from lazy_bird.core.database import get_db
mock_session = MagicMock()
with patch("lazy_bird.core.database.SessionLocal", return_value=mock_session):
gen = get_db()
session = next(gen)
assert session is mock_session
# Complete the generator
try:
next(gen)
except StopIteration:
pass
| [] | yusufkaraaslan/lazy-bird | tests/unit/test_database.py |
"""Additional unit tests for ClaudeService execute_claude edge cases.
Covers lines 220-232 (generic exception handling in execute_claude).
"""
import subprocess
from pathlib import Path
from unittest.mock import MagicMock, Mock, patch
import pytest
from lazy_bird.services.claude_service import (
ClaudeService,
ClaudeServiceError,
)
class TestExecuteClaudeGenericException:
"""Test execute_claude generic exception path (lines 220-232)."""
def test_execute_claude_generic_exception(self, tmp_path):
"""Test that generic exceptions are wrapped in ClaudeServiceError."""
service = ClaudeService(
api_key="test-key",
model="claude-sonnet-4",
max_tokens=4096,
temperature=0.7,
)
with patch("subprocess.run") as mock_run:
mock_run.side_effect = OSError("No such file or directory: 'claude'")
with pytest.raises(ClaudeServiceError) as exc_info:
service.execute_claude(
prompt="Add health system",
working_directory=tmp_path,
project_id="test-proj",
task_id=42,
)
assert "Unexpected error" in str(exc_info.value)
assert "No such file or directory" in str(exc_info.value)
def test_execute_claude_permission_error(self, tmp_path):
"""Test that PermissionError is wrapped in ClaudeServiceError."""
service = ClaudeService(
api_key="test-key",
model="claude-sonnet-4",
max_tokens=4096,
temperature=0.7,
)
with patch("subprocess.run") as mock_run:
mock_run.side_effect = PermissionError("Permission denied")
with pytest.raises(ClaudeServiceError) as exc_info:
service.execute_claude(
prompt="test",
working_directory=tmp_path,
)
assert "Unexpected error" in str(exc_info.value)
class TestGetSettings:
"""Test _get_settings lazy import."""
def test_get_settings_returns_settings(self):
"""Test that _get_settings returns the settings singleton."""
from lazy_bird.services.claude_service import _get_settings
settings = _get_settings()
# Should return the settings object
assert settings is not None
assert hasattr(settings, "CLAUDE_API_KEY")
| """Additional unit tests for ClaudeService execute_claude edge cases.
Covers lines 220-232 (generic exception handling in execute_claude).
"""
import subprocess
from pathlib import Path
from unittest.mock import MagicMock, Mock, patch
import pytest
from lazy_bird.services.claude_service import (
ClaudeService,
ClaudeServiceError,
)
class TestExecuteClaudeGenericException:
"""Test execute_claude generic exception path (lines 220-232)."""
def test_execute_claude_generic_exception(self, tmp_path):
"""Test that generic exceptions are wrapped in ClaudeServiceError."""
service = ClaudeService(
api_key="test-key",
model="claude-sonnet-4",
max_tokens=4096,
temperature=0.7,
)
with patch("subprocess.run") as mock_run:
mock_run.side_effect = OSError("No such file or directory: 'claude'")
with pytest.raises(ClaudeServiceError) as exc_info:
service.execute_claude(
prompt="Add health system",
working_directory=tmp_path,
project_id="test-proj",
task_id=42,
| [
"# yusufkaraaslan/lazy-bird:lazy_bird/services/claude_service.py\nClaudeService"
] | yusufkaraaslan/lazy-bird | tests/unit/test_claude_service_execution.py |
"""Unit tests for ClaudeService.
Tests Claude Code CLI execution, output parsing, token tracking, and error handling.
"""
import json
import subprocess
from pathlib import Path
from unittest.mock import MagicMock, Mock, patch, mock_open
import pytest
from lazy_bird.services.claude_service import (
ClaudeExecutionError,
ClaudeService,
ClaudeServiceError,
ClaudeTimeoutError,
)
class TestClaudeServiceInit:
"""Test ClaudeService initialization."""
def test_init_with_defaults(self):
"""Test ClaudeService initialization with default settings."""
with patch("lazy_bird.services.claude_service._get_settings") as mock_get_settings:
mock_settings = Mock()
mock_settings.CLAUDE_API_KEY = "test-key"
mock_settings.CLAUDE_MODEL = "claude-sonnet-4"
mock_settings.CLAUDE_MAX_TOKENS = 4096
mock_settings.CLAUDE_TEMPERATURE = 0.7
mock_get_settings.return_value = mock_settings
service = ClaudeService()
assert service.api_key == "test-key"
assert service.model == "claude-sonnet-4"
assert service.max_tokens == 4096
assert service.temperature == 0.7
assert service.timeout == 600
def test_init_with_custom_settings(self):
"""Test ClaudeService with custom settings."""
service = ClaudeService(
api_key="custom-key",
model="claude-opus-4",
max_tokens=8192,
temperature=0.5,
timeout=1200,
)
assert service.api_key == "custom-key"
assert service.model == "claude-opus-4"
assert service.max_tokens == 8192
assert service.temperature == 0.5
assert service.timeout == 1200
def test_init_without_api_key(self):
"""Test ClaudeService fails without API key."""
with patch("lazy_bird.services.claude_service._get_settings") as mock_get_settings:
mock_settings = Mock()
mock_settings.CLAUDE_API_KEY = None
mock_settings.CLAUDE_MODEL = "claude-sonnet-4"
mock_settings.CLAUDE_MAX_TOKENS = 4096
mock_settings.CLAUDE_TEMPERATURE = 0.7
mock_get_settings.return_value = mock_settings
with pytest.raises(ClaudeServiceError) as exc_info:
ClaudeService()
assert "API key not configured" in str(exc_info.value)
class TestBuildPrompt:
"""Test _build_prompt method."""
def test_build_prompt_without_error_context(self):
"""Test building prompt without error context."""
service = ClaudeService(
api_key="test-key",
model="claude-sonnet-4",
max_tokens=4096,
temperature=0.7,
)
prompt = service._build_prompt("Add health system", None)
assert prompt == "Add health system"
def test_build_prompt_with_error_context(self):
"""Test building prompt with error context for retry."""
service = ClaudeService(
api_key="test-key", model="claude-sonnet-4", max_tokens=4096, temperature=0.7
)
error_context = "TypeError: 'NoneType' object is not callable"
prompt = service._build_prompt("Add health system", error_context)
assert "Add health system" in prompt
assert "Previous Attempt Error" in prompt
assert error_context in prompt
assert "Please fix the issues above" in prompt
class TestBuildCommand:
"""Test _build_command method."""
def test_build_command(self, tmp_path):
"""Test building Claude CLI command."""
service = ClaudeService(
api_key="test-key",
model="claude-sonnet-4",
max_tokens=4096,
temperature=0.7,
)
command = service._build_command("Test prompt", tmp_path)
assert command[0] == "claude"
assert "-p" in command
assert "Test prompt" in command
assert "--model" in command
assert "claude-sonnet-4" in command
assert "--max-tokens" in command
assert "4096" in command
assert "--temperature" in command
assert "0.7" in command
assert "--output-format" in command
assert "json" in command
class TestConstructTaskPrompt:
"""Test construct_task_prompt method."""
def test_construct_task_prompt(self, tmp_path):
"""Test constructing detailed task prompt."""
service = ClaudeService(
api_key="test-key", model="claude-sonnet-4", max_tokens=4096, temperature=0.7
)
prompt = service.construct_task_prompt(
project_name="My Game",
project_type="godot",
project_id="my-game",
task_title="Add player health",
task_body="Implement health system with 100 max HP",
working_directory=tmp_path,
)
assert "My Game" in prompt
assert "godot" in prompt
assert "my-game" in prompt
assert "Add player health" in prompt
assert "Implement health system with 100 max HP" in prompt
assert str(tmp_path) in prompt
assert "DO NOT" in prompt
assert "git commit" in prompt
class TestExtractTokens:
"""Test _extract_tokens_from_text method."""
def test_extract_tokens_pattern_1(self):
"""Test extracting tokens from 'total tokens: 1234' pattern."""
service = ClaudeService(
api_key="test-key", model="claude-sonnet-4", max_tokens=4096, temperature=0.7
)
text = "Execution completed. Total tokens: 1234"
tokens = service._extract_tokens_from_text(text)
assert tokens == 1234
def test_extract_tokens_pattern_2(self):
"""Test extracting tokens from 'tokens used: 5678' pattern."""
service = ClaudeService(
api_key="test-key", model="claude-sonnet-4", max_tokens=4096, temperature=0.7
)
text = "Usage: Tokens used: 5678"
tokens = service._extract_tokens_from_text(text)
assert tokens == 5678
def test_extract_tokens_pattern_3(self):
"""Test extracting tokens from '9012 tokens' pattern."""
service = ClaudeService(
api_key="test-key", model="claude-sonnet-4", max_tokens=4096, temperature=0.7
)
text = "Processed 9012 tokens successfully"
tokens = service._extract_tokens_from_text(text)
assert tokens == 9012
def test_extract_tokens_not_found(self):
"""Test extracting tokens when pattern not found."""
service = ClaudeService(
api_key="test-key", model="claude-sonnet-4", max_tokens=4096, temperature=0.7
)
text = "No token information here"
tokens = service._extract_tokens_from_text(text)
assert tokens == 0
class TestCalculateCost:
"""Test _calculate_cost method."""
def test_calculate_cost_zero_tokens(self):
"""Test cost calculation with zero tokens."""
service = ClaudeService(
api_key="test-key", model="claude-sonnet-4", max_tokens=4096, temperature=0.7
)
cost = service._calculate_cost(0)
assert cost == 0.0
def test_calculate_cost_1000_tokens(self):
"""Test cost calculation with 1000 tokens."""
service = ClaudeService(
api_key="test-key", model="claude-sonnet-4", max_tokens=4096, temperature=0.7
)
cost = service._calculate_cost(1000)
# Expected: (1000 / 1000) * 0.016 = 0.016
assert cost == pytest.approx(0.016, rel=1e-3)
def test_calculate_cost_5000_tokens(self):
"""Test cost calculation with 5000 tokens."""
service = ClaudeService(
api_key="test-key", model="claude-sonnet-4", max_tokens=4096, temperature=0.7
)
cost = service._calculate_cost(5000)
# Expected: (5000 / 1000) * 0.016 = 0.08
assert cost == pytest.approx(0.08, rel=1e-3)
class TestParseOutput:
"""Test _parse_output method."""
def test_parse_output_json_format(self):
"""Test parsing JSON output from Claude."""
service = ClaudeService(
api_key="test-key", model="claude-sonnet-4", max_tokens=4096, temperature=0.7
)
result = subprocess.CompletedProcess(
args=["claude"],
returncode=0,
stdout=json.dumps(
{
"output": "Task completed successfully",
"usage": {"total_tokens": 1234},
}
),
stderr="",
)
from datetime import datetime, timezone
start_time = datetime.now(timezone.utc)
parsed = service._parse_output(result, start_time, "/tmp/test.log")
assert parsed["success"] is True
assert parsed["output"] == "Task completed successfully"
assert parsed["tokens_used"] == 1234
assert parsed["cost"] == pytest.approx(0.01974, rel=1e-3)
assert parsed["log_file"] == "/tmp/test.log"
assert parsed["return_code"] == 0
def test_parse_output_plain_text(self):
"""Test parsing plain text output."""
service = ClaudeService(
api_key="test-key", model="claude-sonnet-4", max_tokens=4096, temperature=0.7
)
result = subprocess.CompletedProcess(
args=["claude"],
returncode=0,
stdout="Task completed. Total tokens: 2500",
stderr="",
)
from datetime import datetime, timezone
start_time = datetime.now(timezone.utc)
parsed = service._parse_output(result, start_time, "/tmp/test.log")
assert parsed["success"] is True
assert "Task completed" in parsed["output"]
assert parsed["tokens_used"] == 2500
assert parsed["cost"] > 0
def test_parse_output_failure(self):
"""Test parsing output from failed execution."""
service = ClaudeService(
api_key="test-key", model="claude-sonnet-4", max_tokens=4096, temperature=0.7
)
result = subprocess.CompletedProcess(
args=["claude"],
returncode=1,
stdout="",
stderr="Error: API key invalid",
)
from datetime import datetime, timezone
start_time = datetime.now(timezone.utc)
parsed = service._parse_output(result, start_time, "/tmp/test.log")
assert parsed["success"] is False
assert parsed["error"] == "Error: API key invalid"
assert parsed["return_code"] == 1
class TestWriteLog:
"""Test _write_log method."""
def test_write_log(self, tmp_path):
"""Test writing execution log to file."""
service = ClaudeService(
api_key="test-key", model="claude-sonnet-4", max_tokens=4096, temperature=0.7
)
log_file = tmp_path / "test.log"
command = ["claude", "-p", "Test prompt"]
result = subprocess.CompletedProcess(
args=command,
returncode=0,
stdout="Success output",
stderr="",
)
from datetime import datetime, timezone
start_time = datetime.now(timezone.utc)
service._write_log(log_file, command, result, start_time)
assert log_file.exists()
content = log_file.read_text()
assert "Claude Code CLI Execution Log" in content
assert "Return Code: 0" in content
assert "claude -p Test prompt" in content
assert "Success output" in content
class TestExecuteClaude:
"""Test execute_claude method."""
def test_execute_claude_success(self, tmp_path):
"""Test successful Claude execution."""
service = ClaudeService(
api_key="test-key",
model="claude-sonnet-4",
max_tokens=4096,
temperature=0.7,
timeout=10,
)
with patch("subprocess.run") as mock_run:
mock_run.return_value = subprocess.CompletedProcess(
args=["claude"],
returncode=0,
stdout=json.dumps(
{
"output": "Implementation complete",
"usage": {"total_tokens": 1500},
}
),
stderr="",
)
with patch.object(service, "_write_log"):
result = service.execute_claude(
prompt="Add health system",
working_directory=tmp_path,
project_id="test-proj",
task_id=42,
)
assert result["success"] is True
assert result["output"] == "Implementation complete"
assert result["tokens_used"] == 1500
assert result["cost"] > 0
assert mock_run.called
def test_execute_claude_with_error_context(self, tmp_path):
"""Test Claude execution with error context (retry)."""
service = ClaudeService(
api_key="test-key", model="claude-sonnet-4", max_tokens=4096, temperature=0.7
)
with patch("subprocess.run") as mock_run:
mock_run.return_value = subprocess.CompletedProcess(
args=["claude"],
returncode=0,
stdout=json.dumps({"output": "Fixed", "usage": {"total_tokens": 800}}),
stderr="",
)
with patch.object(service, "_write_log"):
result = service.execute_claude(
prompt="Add health system",
working_directory=tmp_path,
error_context="TypeError: previous error",
)
# Verify prompt includes error context
call_args = mock_run.call_args
executed_command = call_args[0][0]
prompt_index = executed_command.index("-p") + 1
executed_prompt = executed_command[prompt_index]
assert "Previous Attempt Error" in executed_prompt
assert "TypeError: previous error" in executed_prompt
def test_execute_claude_timeout(self, tmp_path):
"""Test Claude execution timeout."""
service = ClaudeService(
api_key="test-key",
model="claude-sonnet-4",
max_tokens=4096,
temperature=0.7,
timeout=1,
)
with patch("subprocess.run") as mock_run:
mock_run.side_effect = subprocess.TimeoutExpired(cmd="claude", timeout=1)
with pytest.raises(ClaudeTimeoutError):
service.execute_claude(
prompt="Add health system",
working_directory=tmp_path,
)
def test_execute_claude_failure(self, tmp_path):
"""Test Claude execution returning non-zero exit code."""
service = ClaudeService(
api_key="test-key", model="claude-sonnet-4", max_tokens=4096, temperature=0.7
)
with patch("subprocess.run") as mock_run:
mock_run.return_value = subprocess.CompletedProcess(
args=["claude"],
returncode=1,
stdout="",
stderr="API error: Rate limit exceeded",
)
with patch.object(service, "_write_log"):
result = service.execute_claude(
prompt="Add health system",
working_directory=tmp_path,
)
assert result["success"] is False
assert "Rate limit exceeded" in result["error"]
def test_execute_claude_creates_log_directory(self, tmp_path):
"""Test that execute_claude creates log directory if needed."""
service = ClaudeService(
api_key="test-key", model="claude-sonnet-4", max_tokens=4096, temperature=0.7
)
with patch("subprocess.run") as mock_run:
mock_run.return_value = subprocess.CompletedProcess(
args=["claude"],
returncode=0,
stdout=json.dumps({"output": "Done", "usage": {"total_tokens": 500}}),
stderr="",
)
# Use a custom log file in tmp_path to avoid using default /tmp/lazy-bird-logs
log_file = tmp_path / "logs" / "claude" / "test.log"
with patch("builtins.open", mock_open()):
result = service.execute_claude(
prompt="Test",
working_directory=tmp_path,
project_id="proj",
task_id=1,
log_file=log_file,
)
# Verify result is successful
assert result["success"] is True
class TestClaudeServiceErrors:
"""Test ClaudeService exception classes."""
def test_claude_execution_error(self):
"""Test ClaudeExecutionError exception."""
error = ClaudeExecutionError(
command="claude -p 'test'",
return_code=1,
stderr="API error",
stdout="partial output",
)
assert error.command == "claude -p 'test'"
assert error.return_code == 1
assert error.stderr == "API error"
assert error.stdout == "partial output"
assert "claude -p 'test'" in str(error)
assert "API error" in str(error)
def test_claude_timeout_error(self):
"""Test ClaudeTimeoutError exception."""
error = ClaudeTimeoutError("Execution timed out after 600s")
assert "timed out" in str(error)
assert isinstance(error, ClaudeServiceError)
def test_claude_service_error(self):
"""Test base ClaudeServiceError."""
error = ClaudeServiceError("Something went wrong")
assert "Something went wrong" in str(error)
| """Unit tests for ClaudeService.
Tests Claude Code CLI execution, output parsing, token tracking, and error handling.
"""
import json
import subprocess
from pathlib import Path
from unittest.mock import MagicMock, Mock, patch, mock_open
import pytest
from lazy_bird.services.claude_service import (
ClaudeExecutionError,
ClaudeService,
ClaudeServiceError,
ClaudeTimeoutError,
)
class TestClaudeServiceInit:
"""Test ClaudeService initialization."""
def test_init_with_defaults(self):
"""Test ClaudeService initialization with default settings."""
with patch("lazy_bird.services.claude_service._get_settings") as mock_get_settings:
mock_settings = Mock()
mock_settings.CLAUDE_API_KEY = "test-key"
mock_settings.CLAUDE_MODEL = "claude-sonnet-4"
mock_settings.CLAUDE_MAX_TOKENS = 4096
mock_settings.CLAUDE_TEMPERATURE = 0.7
mock_get_settings.return_value = mock_settings
service = ClaudeService()
assert service.api_key == "test-key"
assert service.model == "claude-sonnet-4"
assert service.max_tokens == 4096
assert service.temperature == 0.7
assert service.timeout == 600
def test_init_with_custom_settings(self): | [
"# yusufkaraaslan/lazy-bird:lazy_bird/services/claude_service.py\nClaudeExecutionError"
] | yusufkaraaslan/lazy-bird | tests/unit/test_claude_service.py |
"""Unit tests for Celery application configuration.
Tests Celery app initialization, configuration, and basic functionality.
"""
import pytest
from celery import Celery
from lazy_bird.tasks import app, debug_task
class TestCeleryApp:
"""Test Celery application initialization and configuration."""
def test_celery_app_instance(self):
"""Test that Celery app is properly instantiated."""
assert isinstance(app, Celery)
assert app.main == "lazy_bird"
def test_celery_app_config_loaded(self):
"""Test that Celery configuration is loaded."""
# Check broker URL is set
assert app.conf.broker_url is not None
assert "redis://" in app.conf.broker_url
# Check result backend is set
assert app.conf.result_backend is not None
assert "redis://" in app.conf.result_backend
def test_celery_serializer_config(self):
"""Test task serialization configuration."""
assert app.conf.task_serializer == "json"
assert app.conf.result_serializer == "json"
assert "json" in app.conf.accept_content
def test_celery_task_time_limits(self):
"""Test task time limit configuration."""
assert app.conf.task_soft_time_limit == 3600 # 1 hour
assert app.conf.task_time_limit == 3900 # 65 minutes
def test_celery_worker_settings(self):
"""Test worker configuration."""
assert app.conf.worker_prefetch_multiplier == 1
assert app.conf.task_acks_late is True
assert app.conf.task_reject_on_worker_lost is True
def test_celery_queues_configured(self):
"""Test that task queues are configured."""
queues = app.conf.task_queues
assert queues is not None
assert len(queues) == 3 # default, high_priority, low_priority
queue_names = [q.name for q in queues]
assert "default" in queue_names
assert "high_priority" in queue_names
assert "low_priority" in queue_names
def test_celery_task_routes_configured(self):
"""Test that task routing is configured."""
routes = app.conf.task_routes
assert routes is not None
assert isinstance(routes, dict)
# Check some expected routes exist
assert "lazy_bird.tasks.queue_processor.process_queue" in routes
def test_celery_beat_schedule_configured(self):
"""Test that beat schedule is configured."""
schedule = app.conf.beat_schedule
assert schedule is not None
assert isinstance(schedule, dict)
# Check periodic tasks are scheduled
assert "process-queue-every-60-seconds" in schedule
assert schedule["process-queue-every-60-seconds"]["schedule"] == 60.0
assert "cleanup-old-worktrees-daily" in schedule
assert schedule["cleanup-old-worktrees-daily"]["schedule"] == 86400.0
def test_celery_timezone_utc(self):
"""Test that timezone is set to UTC."""
assert app.conf.timezone == "UTC"
assert app.conf.enable_utc is True
def test_celery_result_settings(self):
"""Test result storage settings."""
assert app.conf.result_expires == 3600 # 1 hour
assert app.conf.result_extended is True
def test_celery_monitoring_enabled(self):
"""Test that monitoring settings are enabled."""
assert app.conf.worker_send_task_events is True
assert app.conf.task_send_sent_event is True
assert app.conf.task_track_started is True
class TestDebugTask:
"""Test debug task functionality."""
def test_debug_task_exists(self):
"""Test that debug task is registered."""
assert debug_task is not None
assert hasattr(debug_task, "delay")
assert hasattr(debug_task, "apply_async")
def test_debug_task_eager_mode(self):
"""Test debug task in eager mode (immediate execution)."""
# Configure eager mode for this test
app.conf.task_always_eager = True
app.conf.task_eager_propagates = True
# Execute task
result = debug_task.delay()
# Check result
assert result is not None
task_result = result.get()
assert task_result["status"] == "ok"
assert "celery_version" in task_result
def test_debug_task_signature(self):
"""Test creating task signature."""
signature = debug_task.s()
assert signature is not None
assert signature.task == debug_task.name
class TestCeleryQueuePriorities:
"""Test queue priority configuration."""
def test_default_queue_has_priority(self):
"""Test that default queue supports priorities."""
queues = {q.name: q for q in app.conf.task_queues}
default_queue = queues["default"]
assert default_queue.queue_arguments is not None
assert "x-max-priority" in default_queue.queue_arguments
assert default_queue.queue_arguments["x-max-priority"] == 10
def test_high_priority_queue_configured(self):
"""Test that high priority queue exists."""
queues = {q.name: q for q in app.conf.task_queues}
high_priority_queue = queues["high_priority"]
assert high_priority_queue is not None
assert high_priority_queue.routing_key == "high.#"
def test_low_priority_queue_configured(self):
"""Test that low priority queue exists."""
queues = {q.name: q for q in app.conf.task_queues}
low_priority_queue = queues["low_priority"]
assert low_priority_queue is not None
assert low_priority_queue.routing_key == "low.#"
class TestCeleryRetrySettings:
"""Test task retry configuration."""
def test_task_retry_enabled(self):
"""Test that task auto-retry is enabled."""
assert app.conf.task_autoretry_for == (Exception,)
def test_task_retry_max_retries(self):
"""Test max retries configuration."""
retry_kwargs = app.conf.task_retry_kwargs
assert retry_kwargs is not None
assert retry_kwargs["max_retries"] == 3
def test_task_retry_delay(self):
"""Test retry delay configuration."""
assert app.conf.task_default_retry_delay == 60 # 60 seconds
| """Unit tests for Celery application configuration.
Tests Celery app initialization, configuration, and basic functionality.
"""
import pytest
from celery import Celery
from lazy_bird.tasks import app, debug_task
class TestCeleryApp:
"""Test Celery application initialization and configuration."""
def test_celery_app_instance(self):
"""Test that Celery app is properly instantiated."""
assert isinstance(app, Celery)
assert app.main == "lazy_bird"
def test_celery_app_config_loaded(self):
"""Test that Celery configuration is loaded."""
# Check broker URL is set
assert app.conf.broker_url is not None
assert "redis://" in app.conf.broker_url
# Check result backend is set
assert app.conf.result_backend is not None
assert "redis://" in app.conf.result_backend
def test_celery_serializer_config(self):
"""Test task serialization configuration."""
assert app.conf.task_serializer == "json"
assert app.conf.result_serializer == "json"
assert "json" in app.conf.accept_content
def test_celery_task_time_limits(self):
"""Test task time limit configuration."""
assert app.conf.task_soft_time_limit == 3600 # 1 hour
assert app.conf.task_time_limit == 3900 # 65 minutes
def test_celery_worker_settings(self):
"""Test worker configuration."""
| [
"# celery/celery:celery/app/base.py\nCelery"
] | yusufkaraaslan/lazy-bird | tests/unit/test_celery_app.py |
"""
Unit tests for scripts/agent-runner.sh workflow functions
Tests critical functions in the agent-runner script including:
- Error parsing
- Retry logic
- Cleanup functions
- Error context passing
NOTE: These tests are for v1.1 bash-based architecture.
They are skipped during v2.0 refactor (FastAPI/Python architecture).
Will be reimplemented as Python unit tests once v2.0 task execution is complete.
"""
import pytest
import subprocess
import tempfile
import json
from pathlib import Path
import shutil
# Compute repo root relative to this test file
REPO_ROOT = str(Path(__file__).resolve().parent.parent.parent)
class TestParseTestErrors:
"""Test the parse_test_errors function for different project types"""
def setup_method(self):
"""Create temporary log directory for tests"""
self.temp_dir = tempfile.mkdtemp()
self.log_dir = Path(self.temp_dir) / "logs"
self.log_dir.mkdir()
def teardown_method(self):
"""Clean up temporary directory"""
shutil.rmtree(self.temp_dir, ignore_errors=True)
def test_parse_godot_test_errors(self):
"""Test parsing Godot/gdUnit4 test errors"""
# Create mock Godot test output
test_output = """
Tests: 15 | Passed: 13 | Failed: 2 | Errors: 0
FAILED: test_player_jump
Expected: 100
Got: 50
at res://test/test_player.gd:42
FAILED: test_player_velocity
Assertion failed: velocity.y > 0
at res://test/test_player.gd:67
"""
test_log = self.log_dir / "test-output.log"
test_log.write_text(test_output)
# Source the parse_test_errors function and call it
result = subprocess.run(
[
"bash",
"-c",
f"""
LOG_DIR="{self.log_dir}"
PROJECT_TYPE="godot"
# Source parse_test_errors function
source <(grep -A 100 "^parse_test_errors()" {REPO_ROOT}/scripts/agent-runner.sh | sed '/^}}/q')
parse_test_errors
""",
],
capture_output=True,
text=True,
)
assert result.returncode == 0
output = result.stdout
# Verify summary includes test stats
assert "Tests:" in output or "FAILED:" in output
assert "**Test Output Summary:**" in output
def test_parse_python_test_errors(self):
"""Test parsing Python/pytest test errors"""
test_output = """
============================ FAILURES ============================
_______________ test_user_creation _______________
def test_user_creation():
> assert user.email == "test@example.com"
E AssertionError: assert 'wrong@example.com' == 'test@example.com'
app/tests/test_models.py:42: AssertionError
============================ short test summary ==========================
FAILED app/tests/test_models.py::test_user_creation - AssertionError
"""
test_log = self.log_dir / "test-output.log"
test_log.write_text(test_output)
result = subprocess.run(
[
"bash",
"-c",
f"""
LOG_DIR="{self.log_dir}"
PROJECT_TYPE="python"
# Source function
source <(grep -A 100 "^parse_test_errors()" {REPO_ROOT}/scripts/agent-runner.sh | sed '/^}}/q')
parse_test_errors
""",
],
capture_output=True,
text=True,
)
assert result.returncode == 0
output = result.stdout
assert "FAILED" in output or "AssertionError" in output
def test_parse_rust_test_errors(self):
"""Test parsing Rust test errors"""
test_output = """
test result: FAILED. 3 passed; 1 failed; 0 ignored
failures:
---- tests::test_addition stdout ----
thread 'tests::test_addition' panicked at 'assertion failed: `(left == right)`
left: `4`,
right: `5`', src/lib.rs:12:9
"""
test_log = self.log_dir / "test-output.log"
test_log.write_text(test_output)
result = subprocess.run(
[
"bash",
"-c",
f"""
LOG_DIR="{self.log_dir}"
PROJECT_TYPE="rust"
source <(grep -A 100 "^parse_test_errors()" {REPO_ROOT}/scripts/agent-runner.sh | sed '/^}}/q')
parse_test_errors
""",
],
capture_output=True,
text=True,
)
assert result.returncode == 0
output = result.stdout
assert "test result:" in output or "failures:" in output
def test_parse_errors_no_log_file(self):
"""Test parse_test_errors when log file doesn't exist"""
result = subprocess.run(
[
"bash",
"-c",
f"""
LOG_DIR="{self.log_dir}"
PROJECT_TYPE="godot"
source <(grep -A 100 "^parse_test_errors()" {REPO_ROOT}/scripts/agent-runner.sh | sed '/^}}/q')
parse_test_errors
""",
],
capture_output=True,
text=True,
)
assert result.returncode == 0
assert "No test output available" in result.stdout
class TestRetryBackoff:
"""Test retry backoff calculation"""
def test_exponential_backoff_calculation(self):
"""Test that backoff time increases with attempt number"""
backoff_base = 30 # Base backoff seconds
# Calculate backoff for different attempts
expected_backoffs = {
1: 30, # attempt 1: 30 * 1 = 30s
2: 60, # attempt 2: 30 * 2 = 60s
3: 90, # attempt 3: 30 * 3 = 90s
}
for attempt, expected_sleep in expected_backoffs.items():
result = subprocess.run(
["bash", "-c", f"echo $((30 * {attempt}))"], capture_output=True, text=True
)
actual_sleep = int(result.stdout.strip())
assert (
actual_sleep == expected_sleep
), f"Attempt {attempt}: expected {expected_sleep}s, got {actual_sleep}s"
def test_total_attempts_calculation(self):
"""Test that TOTAL_ATTEMPTS = MAX_RETRY_ATTEMPTS + 1"""
max_retries = 3
result = subprocess.run(
["bash", "-c", f"echo $(({max_retries} + 1))"], capture_output=True, text=True
)
total_attempts = int(result.stdout.strip())
assert total_attempts == 4, "3 retries should equal 4 total attempts"
@pytest.mark.skip(reason="v1.1 bash script test - will be reimplemented in v2.0")
class TestErrorContextPassing:
"""Test that error context is properly passed to run_claude function"""
def test_run_claude_accepts_error_context_parameter(self):
"""Test that run_claude function accepts error context parameter"""
# Extract the run_claude function signature
result = subprocess.run(
["bash", "-c", "grep -A 5 '^run_claude()' {REPO_ROOT}/scripts/agent-runner.sh"],
capture_output=True,
text=True,
)
assert result.returncode == 0
function_code = result.stdout
# Verify it accepts error_context parameter
assert (
'local error_context="${1:-}"' in function_code
), "run_claude should accept error_context parameter"
def test_error_context_appended_to_prompt(self):
"""Test that error context is appended to Claude prompt"""
result = subprocess.run(
[
"bash",
"-c",
"grep -A 20 'if \\[ -n \"\\$error_context\" \\]' {REPO_ROOT}/scripts/agent-runner.sh",
],
capture_output=True,
text=True,
)
assert result.returncode == 0
conditional_code = result.stdout
# Verify error context is added to prompt
assert "PREVIOUS ATTEMPT FAILED" in conditional_code
assert "$error_context" in conditional_code
def test_retry_loop_passes_error_details(self):
"""Test that retry loop passes error_details to run_claude"""
result = subprocess.run(
[
"bash",
"-c",
"""
grep -B 5 -A 5 'run_claude "\\$error_details"' {REPO_ROOT}/scripts/agent-runner.sh
""",
],
capture_output=True,
text=True,
)
assert result.returncode == 0
retry_code = result.stdout
# Verify error_details are parsed and passed
assert "error_details=$(parse_test_errors)" in retry_code or "error_details=" in retry_code
assert 'run_claude "$error_details"' in retry_code
@pytest.mark.skip(reason="v1.1 bash script test - will be reimplemented in v2.0")
class TestCleanupWorktree:
"""Test the cleanup_worktree function"""
def test_cleanup_worktree_function_exists(self):
"""Test that cleanup_worktree function is defined"""
result = subprocess.run(
["bash", "-c", "grep -c '^cleanup_worktree()' {REPO_ROOT}/scripts/agent-runner.sh"],
capture_output=True,
text=True,
)
assert result.returncode == 0
count = int(result.stdout.strip())
assert count == 1, "cleanup_worktree function should be defined once"
def test_cleanup_removes_worktree(self):
"""Test that cleanup removes git worktree"""
result = subprocess.run(
[
"bash",
"-c",
"grep 'git worktree remove' {REPO_ROOT}/scripts/agent-runner.sh | grep cleanup_worktree -A 20",
],
capture_output=True,
text=True,
)
assert result.returncode == 0
assert "git worktree remove" in result.stdout
assert "git worktree prune" in result.stdout
def test_cleanup_deletes_branch(self):
"""Test that cleanup deletes local branch"""
result = subprocess.run(
["bash", "-c", "grep -A 30 '^cleanup_worktree()' {REPO_ROOT}/scripts/agent-runner.sh"],
capture_output=True,
text=True,
)
assert result.returncode == 0
assert "git branch -D" in result.stdout, "cleanup should delete local branch"
def test_cleanup_registered_as_exit_trap(self):
"""Test that cleanup_worktree is registered as EXIT trap"""
result = subprocess.run(
["bash", "-c", "grep 'trap cleanup_worktree EXIT' {REPO_ROOT}/scripts/agent-runner.sh"],
capture_output=True,
text=True,
)
assert result.returncode == 0
assert "trap cleanup_worktree EXIT" in result.stdout
@pytest.mark.skip(reason="v1.1 bash script test - will be reimplemented in v2.0")
class TestWorkflowIntegrity:
"""Test overall workflow integrity"""
def test_script_has_shebang(self):
"""Test that agent-runner.sh has proper shebang"""
with open(f"{REPO_ROOT}/scripts/agent-runner.sh", "r") as f:
first_line = f.readline()
assert first_line.startswith("#!"), "Script should have shebang"
assert "bash" in first_line, "Script should use bash"
def test_script_has_error_handling(self):
"""Test that script has proper error handling (set -e)"""
with open(f"{REPO_ROOT}/scripts/agent-runner.sh", "r") as f:
content = f.read()
assert "set -" in content, "Script should have error handling"
def test_retry_loop_has_err_trap_disabled(self):
"""Test that ERR trap is disabled during retry loop"""
result = subprocess.run(
[
"bash",
"-c",
"grep -B 2 'for attempt in' {REPO_ROOT}/scripts/agent-runner.sh | grep 'trap - ERR'",
],
capture_output=True,
text=True,
)
# Should find trap - ERR before the retry loop
assert result.returncode == 0 or "trap - ERR" in result.stdout
def test_all_critical_functions_exist(self):
"""Test that all critical workflow functions are defined"""
critical_functions = [
"parse_test_errors",
"run_claude",
"cleanup_worktree",
"create_worktree",
"run_tests",
"commit_changes",
"push_branch",
]
for func_name in critical_functions:
result = subprocess.run(
[
"bash",
"-c",
f"grep -c '^{func_name}()' {REPO_ROOT}/scripts/agent-runner.sh || echo 0",
],
capture_output=True,
text=True,
)
count = int(result.stdout.strip())
assert count >= 1, f"Critical function {func_name} should be defined"
@pytest.mark.skip(reason="v1.1 bash script test - will be reimplemented in v2.0")
class TestWebUIIntegration:
"""Test Web UI cache deletion integration"""
def test_queue_service_has_remove_from_cache(self):
"""Test that queue_service.py has _remove_from_processed_cache method"""
result = subprocess.run(
[
"bash",
"-c",
"grep -c 'def _remove_from_processed_cache' {REPO_ROOT}/web/backend/services/queue_service.py",
],
capture_output=True,
text=True,
)
assert result.returncode == 0
count = int(result.stdout.strip())
assert count == 1, "_remove_from_processed_cache method should exist"
def test_delete_task_calls_cache_removal(self):
"""Test that delete_task calls _remove_from_processed_cache"""
result = subprocess.run(
[
"bash",
"-c",
"grep -A 20 'def delete_task' {REPO_ROOT}/web/backend/services/queue_service.py | grep '_remove_from_processed_cache'",
],
capture_output=True,
text=True,
)
assert result.returncode == 0
assert "_remove_from_processed_cache" in result.stdout
| """
Unit tests for scripts/agent-runner.sh workflow functions
Tests critical functions in the agent-runner script including:
- Error parsing
- Retry logic
- Cleanup functions
- Error context passing
NOTE: These tests are for v1.1 bash-based architecture.
They are skipped during v2.0 refactor (FastAPI/Python architecture).
Will be reimplemented as Python unit tests once v2.0 task execution is complete.
"""
import pytest
import subprocess
import tempfile
import json
from pathlib import Path
import shutil
# Compute repo root relative to this test file
REPO_ROOT = str(Path(__file__).resolve().parent.parent.parent)
class TestParseTestErrors:
"""Test the parse_test_errors function for different project types"""
def setup_method(self):
"""Create temporary log directory for tests"""
self.temp_dir = tempfile.mkdtemp()
self.log_dir = Path(self.temp_dir) / "logs"
self.log_dir.mkdir()
def teardown_method(self):
"""Clean up temporary directory"""
shutil.rmtree(self.temp_dir, ignore_errors=True)
def test_parse_godot_test_errors(self):
"""Test parsing Godot/gdUnit4 test errors"""
# Create mock Godot test output
test_output = """
Tests: 15 | Passed: 13 | Failed: 2 | Errors: 0
FAILED: test_player_jump
Expected: 100
Got: 50
at res://test/test_player.gd:42
FAILED: test_player_velocity
Assertion failed: velocity.y > 0
at res://test/test_player.gd:67
"""
| [] | yusufkaraaslan/lazy-bird | tests/unit/test_agent_runner.py |
"""Unit tests for lazy-bird modules"""
| """Unit tests for lazy-bird modules"""
| [] | yusufkaraaslan/lazy-bird | tests/unit/__init__.py |
"""Security audit tests for lazy-bird API (Issue #118).
Tests comprehensive security features:
- Authentication bypass attempt detection
- SQL injection protection
- XSS protection via security headers
- Secrets handling validation
- Rate limiting enforcement
All tests verify the security baseline documented in Docs/Design/security-baseline.md
"""
import json
import time
from datetime import datetime, timedelta
from pathlib import Path
from unittest.mock import AsyncMock, Mock, patch
from uuid import uuid4
import pytest
from fastapi import HTTPException, status
from sqlalchemy import text
class TestAuthenticationBypassAttempts:
"""Test detection and prevention of authentication bypass attempts."""
@pytest.mark.asyncio
async def test_missing_api_key_rejected(self):
"""Test that requests without API key are rejected with 401."""
from lazy_bird.api.dependencies import get_current_api_key
# Mock dependencies
mock_db = AsyncMock()
# Attempt without API key (None)
with pytest.raises(HTTPException) as exc_info:
await get_current_api_key(api_key=None, db=mock_db)
# Verify 401 response
assert exc_info.value.status_code == status.HTTP_401_UNAUTHORIZED
assert "API key required" in exc_info.value.detail
@pytest.mark.asyncio
async def test_invalid_api_key_rejected(self):
"""Test that invalid API key is rejected with 401."""
from lazy_bird.api.dependencies import get_current_api_key
# Mock database with no matching API key
class MockResult:
def scalar_one_or_none(self):
return None
mock_db = AsyncMock()
mock_db.execute = AsyncMock(return_value=MockResult())
# Attempt with invalid API key
with pytest.raises(HTTPException) as exc_info:
await get_current_api_key(api_key="lb_invalid_key_12345", db=mock_db)
# Verify 401 response
assert exc_info.value.status_code == status.HTTP_401_UNAUTHORIZED
assert "Invalid API key" in exc_info.value.detail
@pytest.mark.asyncio
async def test_expired_api_key_rejected(self):
"""Test that expired API key is rejected with 403."""
from lazy_bird.api.dependencies import get_current_api_key
# Mock expired API key
mock_api_key = Mock()
mock_api_key.is_active = True
mock_api_key.expires_at = datetime.utcnow() - timedelta(days=1) # Expired yesterday
mock_api_key.key_prefix = "lb_test"
class MockResult:
def scalar_one_or_none(self):
return mock_api_key
mock_db = AsyncMock()
mock_db.execute = AsyncMock(return_value=MockResult())
# Attempt with expired API key
with pytest.raises(HTTPException) as exc_info:
await get_current_api_key(api_key="lb_valid_but_expired", db=mock_db)
# Verify 403 response
assert exc_info.value.status_code == status.HTTP_403_FORBIDDEN
assert "expired" in exc_info.value.detail.lower()
@pytest.mark.asyncio
async def test_inactive_api_key_rejected(self):
"""Test that inactive API key is not found (rejected with 401)."""
from lazy_bird.api.dependencies import get_current_api_key
# Mock database - inactive keys are filtered by query
class MockResult:
def scalar_one_or_none(self):
return None # is_active=False filtered out by WHERE clause
mock_db = AsyncMock()
mock_db.execute = AsyncMock(return_value=MockResult())
# Attempt with inactive API key
with pytest.raises(HTTPException) as exc_info:
await get_current_api_key(api_key="lb_inactive_key", db=mock_db)
# Verify 401 response (not found because filtered by is_active)
assert exc_info.value.status_code == status.HTTP_401_UNAUTHORIZED
@pytest.mark.asyncio
async def test_missing_jwt_token_rejected(self):
"""Test that requests without JWT token are rejected with 401."""
from lazy_bird.api.dependencies import get_current_user
# Attempt without token (None)
with pytest.raises(HTTPException) as exc_info:
await get_current_user(token=None)
# Verify 401 response
assert exc_info.value.status_code == status.HTTP_401_UNAUTHORIZED
assert "Authentication required" in exc_info.value.detail
@pytest.mark.asyncio
async def test_invalid_jwt_token_rejected(self):
"""Test that invalid JWT token is rejected with 401."""
from lazy_bird.api.dependencies import get_current_user
# Attempt with malformed token
with pytest.raises(HTTPException) as exc_info:
await get_current_user(token="invalid.jwt.token")
# Verify 401 response
assert exc_info.value.status_code == status.HTTP_401_UNAUTHORIZED
assert "Invalid or expired token" in exc_info.value.detail
@pytest.mark.asyncio
async def test_tampered_jwt_token_rejected(self):
"""Test that tampered JWT token is rejected."""
from lazy_bird.core.security import create_access_token
# Create valid token
data = {"sub": "user123", "email": "user@example.com"}
token = create_access_token(data)
# Tamper with token by changing payload
parts = token.split(".")
if len(parts) == 3:
# Change last character of payload
parts[1] = parts[1][:-1] + "X"
tampered_token = ".".join(parts)
# Attempt to use tampered token
from lazy_bird.api.dependencies import get_current_user
with pytest.raises(HTTPException) as exc_info:
await get_current_user(token=tampered_token)
# Verify 401 response
assert exc_info.value.status_code == status.HTTP_401_UNAUTHORIZED
@pytest.mark.asyncio
async def test_expired_jwt_token_rejected(self):
"""Test that expired JWT token is rejected."""
from lazy_bird.core.security import create_access_token
# Create token that expires immediately
data = {"sub": "user123"}
token = create_access_token(data, expires_delta=timedelta(seconds=-1))
# Wait a moment to ensure expiration
time.sleep(0.1)
# Attempt to use expired token
from lazy_bird.api.dependencies import get_current_user
with pytest.raises(HTTPException) as exc_info:
await get_current_user(token=token)
# Verify 401 response
assert exc_info.value.status_code == status.HTTP_401_UNAUTHORIZED
@pytest.mark.asyncio
async def test_insufficient_scopes_rejected(self):
"""Test that API key with insufficient scopes is rejected with 403."""
from lazy_bird.api.dependencies import RequireScopes
# Mock API key with only "read" scope
mock_api_key = Mock()
mock_api_key.scopes = ["read"]
mock_api_key.key_prefix = "lb_test"
# Require "write" or "admin" scope
require_write = RequireScopes(["write", "admin"])
# Attempt with insufficient scopes
with pytest.raises(HTTPException) as exc_info:
await require_write(api_key=mock_api_key)
# Verify 403 response
assert exc_info.value.status_code == status.HTTP_403_FORBIDDEN
assert "Insufficient permissions" in exc_info.value.detail
class TestSQLInjectionProtection:
"""Test SQL injection prevention via SQLAlchemy parameterization."""
@pytest.mark.asyncio
async def test_sqlalchemy_parameterized_queries(self):
"""Test that SQLAlchemy uses parameterized queries (not string concatenation)."""
from sqlalchemy import select
from lazy_bird.models.project import Project
# Build query with user input (should be parameterized)
malicious_input = "'; DROP TABLE projects; --"
# SQLAlchemy automatically parameterizes this
query = select(Project).where(Project.name == malicious_input)
# Verify query is safe (has parameters, not raw SQL injection)
compiled = query.compile()
# Parameters are bound separately, not concatenated
assert "DROP TABLE" not in str(compiled)
@pytest.mark.asyncio
async def test_text_query_with_parameters(self):
"""Test that raw SQL queries use parameter binding."""
# Safe: Using text() with bound parameters
safe_query = text("SELECT * FROM projects WHERE name = :name")
params = {"name": "'; DROP TABLE projects; --"}
# Verify parameters are bound separately
assert ":name" in str(safe_query)
assert "DROP TABLE" not in str(safe_query)
def test_input_sanitization_for_search(self):
"""Test that search inputs are sanitized."""
# Simulate search input sanitization
malicious_input = "<script>alert('XSS')</script>'; DROP TABLE users; --"
# Basic sanitization: escape SQL wildcards and limit length
sanitized = malicious_input.replace("%", "\\%").replace("_", "\\_")
sanitized = sanitized[:100] # Length limit
# Should not contain dangerous patterns raw
# (SQLAlchemy will parameterize further)
assert "DROP TABLE" in sanitized # Still in string
# But when passed to SQLAlchemy, it's a parameter value, not SQL
@pytest.mark.asyncio
async def test_no_string_concatenation_in_queries(self):
"""Test that queries don't use string concatenation (unsafe pattern)."""
from sqlalchemy import select
from lazy_bird.models.task_run import TaskRun
# User input
user_input = "42'; DELETE FROM task_runs WHERE '1'='1"
# Safe: Using SQLAlchemy ORM (parameterized)
query = select(TaskRun).where(TaskRun.work_item_id == user_input)
# Compile to SQL
compiled = str(query.compile(compile_kwargs={"literal_binds": False}))
# Should use parameters (:work_item_id_1), not concatenation
assert "DELETE FROM" not in compiled
assert ":work_item_id" in compiled or "?" in compiled # Parameter placeholder
@pytest.mark.asyncio
async def test_orm_prevents_sql_injection(self):
"""Test that SQLAlchemy ORM inherently prevents SQL injection."""
from sqlalchemy import select
from lazy_bird.models.project import Project
# Various SQL injection attempts
injection_attempts = [
"1' OR '1'='1",
"'; DROP TABLE projects; --",
"1; DELETE FROM projects WHERE id > 0",
"' UNION SELECT * FROM api_keys--",
]
for attempt in injection_attempts:
# ORM query with malicious input
query = select(Project).where(Project.id == attempt)
# Compile to see actual SQL
compiled = query.compile()
# Verify: All values are parameterized, not injected
sql_str = str(compiled)
# Should have parameter placeholder, not literal injection
assert "DROP TABLE" not in sql_str
assert "UNION SELECT" not in sql_str
# ORM treats input as literal value, not SQL
class TestXSSProtection:
"""Test XSS protection via security headers and input sanitization."""
@pytest.mark.asyncio
async def test_security_headers_present(self):
"""Test that security headers are added to responses."""
from lazy_bird.api.middleware import SecurityHeadersMiddleware
from fastapi import Request, Response
from unittest.mock import AsyncMock
# Create middleware
app = AsyncMock()
middleware = SecurityHeadersMiddleware(app)
# Mock request and response
request = Mock(spec=Request)
response = Response(content="test")
# Mock call_next to return response
async def mock_call_next(req):
return response
# Process through middleware
result = await middleware.dispatch(request, mock_call_next)
# Verify security headers
assert result.headers.get("X-Content-Type-Options") == "nosniff"
assert result.headers.get("X-Frame-Options") == "DENY"
assert result.headers.get("X-XSS-Protection") == "1; mode=block"
@pytest.mark.asyncio
async def test_hsts_header_in_production(self):
"""Test that HSTS header is added in production mode."""
from lazy_bird.api.middleware import SecurityHeadersMiddleware
from lazy_bird.core.config import settings
from fastapi import Request, Response
from unittest.mock import AsyncMock
# Mock production mode
with patch.object(settings, "DEBUG", False):
# Create middleware
app = AsyncMock()
middleware = SecurityHeadersMiddleware(app)
# Mock request and response
request = Mock(spec=Request)
response = Response(content="test")
async def mock_call_next(req):
return response
# Process through middleware
result = await middleware.dispatch(request, mock_call_next)
# Verify HSTS header in production
assert "Strict-Transport-Security" in result.headers
assert "max-age=" in result.headers["Strict-Transport-Security"]
def test_html_entities_in_responses(self):
"""Test that JSON responses are safe from XSS via Content-Type."""
# Simulate error message with HTML
malicious_input = "<script>alert('XSS')</script>"
# FastAPI's JSONResponse with application/json Content-Type
from fastapi.responses import JSONResponse
response = JSONResponse(status_code=400, content={"error": malicious_input})
# Verify Content-Type is application/json (prevents XSS interpretation)
assert response.media_type == "application/json"
# XSS protection comes from:
# 1. Content-Type: application/json (browser won't execute)
# 2. X-Content-Type-Options: nosniff (SecurityHeadersMiddleware)
# 3. X-XSS-Protection header (SecurityHeadersMiddleware)
# JSON preserves the string but browser won't execute due to Content-Type
# This is SAFE - JSON responses don't need HTML escaping
def test_content_type_prevents_xss(self):
"""Test that proper Content-Type prevents XSS."""
from fastapi.responses import JSONResponse
response = JSONResponse(content={"data": "value"})
# Content-Type should be application/json
assert response.media_type == "application/json"
# X-Content-Type-Options: nosniff prevents MIME sniffing
# (added by SecurityHeadersMiddleware)
class TestRateLimiting:
"""Test rate limiting enforcement via middleware."""
@pytest.mark.asyncio
async def test_rate_limit_headers_present(self):
"""Test that rate limit headers are added to responses."""
from lazy_bird.api.middleware import RateLimitMiddleware
from fastapi import Request, Response
# Create middleware with low limit for testing
app = Mock()
middleware = RateLimitMiddleware(app, requests_per_minute=10)
# Mock request
request = Mock(spec=Request)
request.client = Mock()
request.client.host = "127.0.0.1"
request.headers = {}
# Mock response
response = Response(content="test")
# Mock call_next
async def mock_call_next(req):
return response
# Mock Redis to fail (no rate limiting)
with patch("lazy_bird.core.redis.get_redis", side_effect=Exception("Redis unavailable")):
# Process through middleware
result = await middleware.dispatch(request, mock_call_next)
# Verify rate limit headers are present
assert "X-RateLimit-Limit" in result.headers
assert "X-RateLimit-Remaining" in result.headers
@pytest.mark.asyncio
async def test_rate_limit_exceeded_returns_429(self):
"""Test that exceeding rate limit returns 429 Too Many Requests."""
from lazy_bird.api.middleware import RateLimitMiddleware
from fastapi import Request
# Create middleware with limit of 2 requests per minute
app = Mock()
middleware = RateLimitMiddleware(app, requests_per_minute=2)
# Mock request
request = Mock(spec=Request)
request.client = Mock()
request.client.host = "127.0.0.1"
request.headers = {}
# Mock Redis client
mock_redis = Mock()
mock_redis.get = Mock(return_value=b"3") # Already at 3 requests (exceeded limit of 2)
mock_redis.pipeline = Mock()
# Mock call_next (should not be called if rate limited)
async def mock_call_next(req):
return Response(content="should not reach here")
# Patch get_redis
with patch("lazy_bird.core.redis.get_redis", return_value=mock_redis):
# Process through middleware
result = await middleware.dispatch(request, mock_call_next)
# Verify 429 response
assert result.status_code == status.HTTP_429_TOO_MANY_REQUESTS
# Verify retry headers
assert "Retry-After" in result.headers
assert "X-RateLimit-Limit" in result.headers
assert "X-RateLimit-Remaining" in result.headers
assert result.headers["X-RateLimit-Remaining"] == "0"
@pytest.mark.asyncio
async def test_rate_limit_per_api_key(self):
"""Test that rate limiting tracks by API key (not just IP)."""
from lazy_bird.api.middleware import RateLimitMiddleware
from fastapi import Request, Response
# Create middleware
app = Mock()
middleware = RateLimitMiddleware(app, requests_per_minute=10)
# Mock request with API key
request = Mock(spec=Request)
request.client = Mock()
request.client.host = "127.0.0.1"
request.headers = {"X-API-Key": "lb_test_api_key_12345"}
# Mock Redis
mock_redis = Mock()
mock_redis.get = Mock(return_value=None) # First request
mock_pipeline = Mock()
mock_pipeline.incr = Mock()
mock_pipeline.expire = Mock()
mock_pipeline.execute = Mock()
mock_redis.pipeline = Mock(return_value=mock_pipeline)
# Mock call_next
async def mock_call_next(req):
return Response(content="success")
# Patch get_redis
with patch("lazy_bird.core.redis.get_redis", return_value=mock_redis):
# Process through middleware
await middleware.dispatch(request, mock_call_next)
# Verify Redis key uses API key prefix (not IP)
mock_redis.get.assert_called_once()
call_args = mock_redis.get.call_args[0][0]
# Key should include API key prefix
assert "rate_limit:lb_test_api_key" in call_args
class TestSecretsHandling:
"""Test that secrets are handled securely and not leaked."""
def test_secrets_not_in_logs(self):
"""Test that API key prefix is logged, not full key."""
from lazy_bird.core.security import get_api_key_prefix
# Full API key (should NEVER be logged)
full_api_key = "lb_secret_key_12345678901234567890"
# Only prefix should be logged (first 8 characters)
safe_prefix = get_api_key_prefix(full_api_key)
# Verify: Prefix is safe to log, but is truncated
assert len(safe_prefix) == 8
assert safe_prefix == "lb_secre"
assert safe_prefix != full_api_key
# In production, logs should ONLY contain prefix:
# logger.info(f"API key used: {get_api_key_prefix(api_key)}")
# NOT: logger.info(f"API key: {api_key}") # DANGEROUS!
def test_api_key_prefix_in_logs_not_full_key(self):
"""Test that only API key prefix is logged, not full key."""
from lazy_bird.core.security import get_api_key_prefix
# Full API key (67 characters)
full_key = "lb_" + "a" * 64
# Get prefix for logging
prefix = get_api_key_prefix(full_key)
# Verify: Prefix is only 8 characters
assert len(prefix) == 8
assert prefix == "lb_aaaaa"
assert prefix != full_key
def test_password_hashing_requirement(self):
"""Test that password hashing functions exist and are configured.
Note: Full password hashing tests are in tests/unit/test_security.py
This audit verifies the security functions are available.
"""
# Verify password hashing functions exist
from lazy_bird.core import security
assert hasattr(security, "hash_password")
assert hasattr(security, "verify_password")
# Verify hashing actually works (uses bcrypt directly)
test_password = "test_password_123"
hashed = security.hash_password(test_password)
assert hashed is not None
assert hashed != test_password
assert security.verify_password(test_password, hashed)
# Password security requirements (documented):
# 1. MUST use bcrypt (industry standard) ✓
# 2. NEVER store plaintext passwords ✓
# 3. Use bcrypt's automatic salting ✓
# 4. Use verify_password() for constant-time comparison ✓
def test_jwt_payload_no_sensitive_data(self):
"""Test that JWT tokens don't include passwords or full API keys."""
from lazy_bird.core.security import create_access_token, verify_token
# Create token with user data (should NOT include password)
user_data = {
"sub": "user123",
"email": "user@example.com",
"role": "admin",
# NO password field
}
token = create_access_token(user_data)
payload = verify_token(token)
# Verify: No sensitive data in token
assert "password" not in payload
assert "api_key" not in payload
assert "secret" not in payload
def test_secrets_file_permissions(self):
"""Test that secrets directory would have correct permissions."""
# This is a documentation test - verify the pattern exists
from lazy_bird.core.config import settings
# Secrets should be loaded from secure location
# Pattern: ~/.config/lazy_birtd/secrets/
expected_pattern = ".config/lazy_birtd/secrets"
# Verify this pattern is documented
# (Actual file permissions checked by deployment, not unit tests)
# If secrets directory exists, check permissions
secrets_dir = Path.home() / ".config" / "lazy_birtd" / "secrets"
if secrets_dir.exists():
stat = secrets_dir.stat()
mode = stat.st_mode & 0o777
# Should be 700 (owner only)
assert mode == 0o700, f"Secrets directory has insecure permissions: {oct(mode)}"
def test_error_responses_dont_leak_internals(self):
"""Test that error responses don't leak internal details in production."""
from lazy_bird.api.middleware import ErrorHandlingMiddleware
from lazy_bird.core.config import settings
from fastapi import Request
from unittest.mock import AsyncMock
# Mock production mode
with patch.object(settings, "DEBUG", False):
# Create middleware
app = AsyncMock()
middleware = ErrorHandlingMiddleware(app)
# Mock request
request = Mock(spec=Request)
request.state = Mock()
request.state.request_id = "test-123"
request.method = "GET"
request.url = Mock()
request.url.path = "/api/test"
# Mock call_next that raises exception
async def mock_call_next(req):
raise ValueError(
"Internal database connection string: postgresql://user:pass@localhost/db"
)
# Process through middleware
import asyncio
response = asyncio.run(middleware.dispatch(request, mock_call_next))
# Get response body
body = json.loads(response.body.decode())
# Verify: Generic error in production, no internal details
assert "Internal Server Error" in body["error"]
assert "database connection string" not in body["message"].lower()
assert "postgresql://" not in str(body)
__all__ = [
"TestAuthenticationBypassAttempts",
"TestSQLInjectionProtection",
"TestXSSProtection",
"TestRateLimiting",
"TestSecretsHandling",
]
| """Security audit tests for lazy-bird API (Issue #118).
Tests comprehensive security features:
- Authentication bypass attempt detection
- SQL injection protection
- XSS protection via security headers
- Secrets handling validation
- Rate limiting enforcement
All tests verify the security baseline documented in Docs/Design/security-baseline.md
"""
import json
import time
from datetime import datetime, timedelta
from pathlib import Path
from unittest.mock import AsyncMock, Mock, patch
from uuid import uuid4
import pytest
from fastapi import HTTPException, status
from sqlalchemy import text
class TestAuthenticationBypassAttempts:
"""Test detection and prevention of authentication bypass attempts."""
@pytest.mark.asyncio
async def test_missing_api_key_rejected(self):
"""Test that requests without API key are rejected with 401."""
from lazy_bird.api.dependencies import get_current_api_key
# Mock dependencies
mock_db = AsyncMock()
# Attempt without API key (None)
with pytest.raises(HTTPException) as exc_info:
await get_current_api_key(api_key=None, db=mock_db)
# Verify 401 response
assert exc_info.value.status_code == status.HTTP_401_UNAUTHORIZED
assert "API key required" in exc_info.value.detail
@pytest.mark.asyncio
async def test_invalid_api_key_rejected(self):
"""Test that invalid API key is rejected with 401."""
from lazy_bird.api.dependencies import get_current_api_key
# Mock database with no matching API key
| [
"# fastapi/fastapi:fastapi/exceptions.py\nHTTPException",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/sql/_elements_constructors.py\ntext"
] | yusufkaraaslan/lazy-bird | tests/security/test_security_audit.py |
"""Security tests for lazy-bird.
Tests authentication, authorization, input validation, and security features.
"""
| """Security tests for lazy-bird.
Tests authentication, authorization, input validation, and security features.
"""
| [] | yusufkaraaslan/lazy-bird | tests/security/__init__.py |
"""Performance tests for API endpoints (Issue #117).
Tests performance targets:
- Response time <200ms (p95)
- Throughput ≥20 req/s
- Performance regression detection
Note: Full endpoint tests require actual endpoints to be implemented.
This file validates performance calculation logic and establishes
performance testing patterns for future use.
See: Docs/Design/performance-targets.md for detailed analysis.
"""
from statistics import mean
import pytest
class TestPerformanceMetrics:
"""Test performance metrics collection and validation."""
def test_response_time_percentiles(self):
"""Test calculating response time percentiles."""
# Simulate response times (ms)
response_times = [10, 15, 20, 25, 30, 50, 75, 100, 150, 180]
# Calculate percentiles
sorted_times = sorted(response_times)
p50 = sorted_times[int(len(sorted_times) * 0.50)]
p95 = sorted_times[int(len(sorted_times) * 0.95)]
p99 = sorted_times[int(len(sorted_times) * 0.99)]
# Verify metrics meet targets
assert p50 < 100, f"P50 ({p50}ms) should be <100ms"
assert p95 < 200, f"P95 ({p95}ms) should be <200ms"
assert p99 < 500, f"P99 ({p99}ms) should be <500ms"
def test_throughput_calculation(self):
"""Test calculating requests per second."""
total_requests = 100
total_time_seconds = 5.0
throughput = total_requests / total_time_seconds
# Verify throughput meets target
assert throughput >= 20, f"Throughput {throughput:.2f} req/s should be >=20 req/s"
def test_concurrent_performance_aggregation(self):
"""Test aggregating performance from concurrent operations."""
# Simulate concurrent request durations
durations_ms = [45, 50, 55, 60, 75, 80, 90, 100, 120, 150]
# Calculate aggregate metrics
avg_duration = mean(durations_ms)
p95_duration = sorted(durations_ms)[int(len(durations_ms) * 0.95)]
max_duration = max(durations_ms)
# Verify metrics
assert avg_duration < 100, f"Average {avg_duration:.2f}ms should be <100ms"
assert p95_duration < 200, f"P95 {p95_duration:.2f}ms should be <200ms"
assert max_duration < 500, f"Max {max_duration:.2f}ms should be <500ms"
def test_performance_degradation_detection(self):
"""Test detecting performance regression."""
# Baseline from previous release
baseline_p95 = 180 # ms
# Current measurements
current_measurements = [50, 60, 75, 80, 90, 100, 120, 150, 170, 185]
current_p95 = sorted(current_measurements)[int(len(current_measurements) * 0.95)]
# Allow 10% regression tolerance
regression_threshold = baseline_p95 * 1.1
assert (
current_p95 <= regression_threshold
), f"Performance regression: {current_p95}ms vs {baseline_p95}ms baseline"
def test_percentile_edge_cases(self):
"""Test percentile calculation with edge cases."""
# Single value
single = [100]
assert sorted(single)[0] == 100
# Two values
two = [50, 150]
p50 = sorted(two)[int(len(two) * 0.50)]
assert p50 == 50 or p50 == 150 # Either is valid for 50th percentile
# All same values
uniform = [100] * 10
p95 = sorted(uniform)[int(len(uniform) * 0.95)]
assert p95 == 100
__all__ = ["TestPerformanceMetrics"]
| """Performance tests for API endpoints (Issue #117).
Tests performance targets:
- Response time <200ms (p95)
- Throughput ≥20 req/s
- Performance regression detection
Note: Full endpoint tests require actual endpoints to be implemented.
This file validates performance calculation logic and establishes
performance testing patterns for future use.
See: Docs/Design/performance-targets.md for detailed analysis.
"""
from statistics import mean
import pytest
class TestPerformanceMetrics:
"""Test performance metrics collection and validation."""
def test_response_time_percentiles(self):
"""Test calculating response time percentiles."""
# Simulate response times (ms)
response_times = [10, 15, 20, 25, 30, 50, 75, 100, 150, 180]
# Calculate percentiles
sorted_times = sorted(response_times)
p50 = sorted_times[int(len(sorted_times) * 0.50)]
p95 = sorted_times[int(len(sorted_times) * 0.95)]
p99 = sorted_times[int(len(sorted_times) * 0.99)]
# Verify metrics meet targets
assert p50 < 100, f"P50 ({p50}ms) should be <100ms"
assert p95 < 200, f"P95 ({p95}ms) should be <200ms"
assert p99 < 500, f"P99 ({p99}ms) should be <500ms"
def test_throughput_calculation(self):
"""Test calculating requests per second."""
total_requests = 100
total_time_seconds = 5.0
throughput = total_requests / total | [] | yusufkaraaslan/lazy-bird | tests/performance/test_api_performance.py |
"""Performance tests for lazy-bird.
Tests API response times, throughput, and database optimization.
"""
| """Performance tests for lazy-bird.
Tests API response times, throughput, and database optimization.
"""
| [] | yusufkaraaslan/lazy-bird | tests/performance/__init__.py |
"""
Integration tests for multi-step workflow execution
Tests that validate multiple workflow steps working together:
- Worktree creation → tests → cleanup
- Error parsing → retry → success
- Multi-project task isolation
- PR creation → update → merge
"""
import pytest
import subprocess
import tempfile
import json
import time
from pathlib import Path
import shutil
# Compute repo root relative to this test file
REPO_ROOT = str(Path(__file__).resolve().parent.parent.parent)
@pytest.fixture
def test_project():
"""Create a temporary test project directory"""
temp_dir = tempfile.mkdtemp(prefix="lazy-bird-test-")
project_dir = Path(temp_dir) / "test-project"
# Initialize git repo
subprocess.run(["git", "init", str(project_dir)], check=True, capture_output=True)
subprocess.run(
["git", "config", "user.email", "test@example.com"],
cwd=project_dir,
check=True,
capture_output=True,
)
subprocess.run(
["git", "config", "user.name", "Test User"],
cwd=project_dir,
check=True,
capture_output=True,
)
# Create initial commit
(project_dir / "README.md").write_text("# Test Project")
subprocess.run(["git", "add", "."], cwd=project_dir, check=True, capture_output=True)
subprocess.run(
["git", "commit", "-m", "Initial commit"], cwd=project_dir, check=True, capture_output=True
)
yield project_dir
# Cleanup
shutil.rmtree(temp_dir, ignore_errors=True)
@pytest.fixture
def test_logs_dir():
"""Create temporary logs directory"""
temp_dir = tempfile.mkdtemp(prefix="lazy-bird-logs-")
yield Path(temp_dir)
shutil.rmtree(temp_dir, ignore_errors=True)
class TestWorktreeWorkflow:
"""Test complete worktree creation → work → cleanup workflow"""
def test_worktree_creation_and_cleanup(self, test_project):
"""Test creating and cleaning up a worktree"""
worktree_path = test_project.parent / "test-worktree"
branch_name = "feature-test-42"
# Create worktree directly using git commands
result = subprocess.run(
["git", "worktree", "add", "-b", branch_name, str(worktree_path)],
cwd=test_project,
capture_output=True,
text=True,
)
assert result.returncode == 0, f"Worktree creation failed: {result.stderr}"
assert worktree_path.exists(), "Worktree directory should exist"
# Verify branch was created
branch_check = subprocess.run(
["git", "branch", "--list", branch_name],
cwd=test_project,
capture_output=True,
text=True,
)
assert branch_name in branch_check.stdout
# Cleanup using direct git commands
cleanup_result = subprocess.run(
["git", "worktree", "remove", str(worktree_path), "--force"],
cwd=test_project,
capture_output=True,
text=True,
)
assert cleanup_result.returncode == 0, f"Cleanup failed: {cleanup_result.stderr}"
subprocess.run(["git", "branch", "-D", branch_name], cwd=test_project, capture_output=True)
assert not worktree_path.exists(), "Worktree should be removed"
# Verify branch was deleted (since it wasn't pushed)
branch_check_after = subprocess.run(
["git", "branch", "--list", branch_name],
cwd=test_project,
capture_output=True,
text=True,
)
assert branch_name not in branch_check_after.stdout, "Branch should be deleted"
class TestErrorParsingWorkflow:
"""Test error parsing → context passing → retry workflow"""
def test_godot_error_parsing_to_claude_context(self, test_logs_dir):
"""Test that Godot errors are parsed and formatted for Claude"""
# Create mock Godot test output
test_output = """
Tests: 10 | Passed: 8 | Failed: 2 | Errors: 0
FAILED: test_player_health
Expected: 100
Got: 50
at res://test/test_player.gd:42
FAILED: test_enemy_spawn
Assertion failed: enemy != null
at res://test/test_enemy.gd:67
"""
test_log = test_logs_dir / "test-output.log"
test_log.write_text(test_output)
# Test Python parser
parser_script = f"{REPO_ROOT}/scripts/parse_test_errors.py"
result = subprocess.run(
["python3", parser_script, str(test_log), "godot"], capture_output=True, text=True
)
assert result.returncode == 0
output = result.stdout
# Verify error summary contains key information
assert "Test Error Summary" in output
assert "test_player_health" in output
assert "test_enemy.gd:67" in output
assert "Error(s) Found" in output
# Test JSON output
json_result = subprocess.run(
["python3", parser_script, str(test_log), "godot", "--json"],
capture_output=True,
text=True,
)
assert json_result.returncode == 0
error_data = json.loads(json_result.stdout)
assert error_data["framework"] == "godot"
assert error_data["stats"]["failed"] == 2
# The parser finds FAILED entries plus any separate assertion matches,
# so error_count >= stats["failed"]
assert error_data["error_count"] >= 2
assert len(error_data["errors"]) >= 2
# Verify specific error details
errors = {e["test_name"]: e for e in error_data["errors"]}
assert "test_player_health" in errors
assert errors["test_player_health"]["file"] == "res://test/test_player.gd"
assert errors["test_player_health"]["line"] == 42
def test_python_error_parsing_to_claude_context(self, test_logs_dir):
"""Test that Python/pytest errors are parsed correctly"""
test_output = """
======================== FAILURES ========================
____________ test_user_creation ____________
def test_user_creation():
> assert user.email == "test@example.com"
E AssertionError: assert 'wrong@example.com' == 'test@example.com'
app/tests/test_models.py:42: AssertionError
============ short test summary ============
FAILED app/tests/test_models.py::test_user_creation - AssertionError
======================== 1 failed, 5 passed in 2.5s ========================
"""
test_log = test_logs_dir / "test-output.log"
test_log.write_text(test_output)
parser_script = f"{REPO_ROOT}/scripts/parse_test_errors.py"
result = subprocess.run(
["python3", parser_script, str(test_log), "python"], capture_output=True, text=True
)
assert result.returncode == 0
output = result.stdout
assert "test_user_creation" in output
assert "test_models.py:42" in output or "test_models.py" in output
class TestMultiProjectIsolation:
"""Test that multi-project tasks are properly isolated"""
def test_worktree_naming_includes_project_id(self, test_project):
"""Test that worktrees include project ID for isolation"""
# Create worktrees for two different "projects"
worktree1_path = test_project.parent / "agent-project1-42"
worktree2_path = test_project.parent / "agent-project2-42"
# Create first worktree directly using git commands
result1 = subprocess.run(
["git", "worktree", "add", "-b", "feature-project1-42", str(worktree1_path)],
cwd=test_project,
capture_output=True,
text=True,
)
assert result1.returncode == 0, f"Worktree 1 creation failed: {result1.stderr}"
assert worktree1_path.exists()
# Create second worktree directly using git commands
result2 = subprocess.run(
["git", "worktree", "add", "-b", "feature-project2-42", str(worktree2_path)],
cwd=test_project,
capture_output=True,
text=True,
)
assert result2.returncode == 0, f"Worktree 2 creation failed: {result2.stderr}"
assert worktree2_path.exists()
# Both worktrees should coexist
assert worktree1_path.exists()
assert worktree2_path.exists()
# Cleanup
for worktree, branch in [
(worktree1_path, "feature-project1-42"),
(worktree2_path, "feature-project2-42"),
]:
subprocess.run(
["git", "worktree", "remove", str(worktree), "--force"],
cwd=test_project,
capture_output=True,
)
subprocess.run(["git", "branch", "-D", branch], cwd=test_project, capture_output=True)
class TestRetryWorkflow:
"""Test retry logic with error context passing"""
def test_retry_backoff_timing(self):
"""Test that retry backoff increases correctly"""
# Test exponential backoff calculation (without actually waiting)
for attempt in range(1, 4):
backoff = 30 * attempt
expected_backoff = {1: 30, 2: 60, 3: 90}
assert backoff == expected_backoff[attempt]
def test_error_context_structure(self, test_logs_dir):
"""Test that error context has the right structure for Claude"""
test_output = """
Tests: 5 | Passed: 3 | Failed: 2 | Errors: 0
FAILED: test_combat_system
Damage calculation incorrect
at res://systems/combat.gd:123
"""
test_log = test_logs_dir / "test-output.log"
test_log.write_text(test_output)
parser_script = f"{REPO_ROOT}/scripts/parse_test_errors.py"
result = subprocess.run(
["python3", parser_script, str(test_log), "godot"], capture_output=True, text=True
)
assert result.returncode == 0
error_context = result.stdout
# Verify error context includes file paths and line numbers
assert "combat.gd:123" in error_context
assert "test_combat_system" in error_context
# This is what gets passed to Claude via run_claude "$error_context"
# Should be well-formatted and informative
assert "Error" in error_context
assert len(error_context) > 50, "Error context should be detailed"
class TestWebUIIntegration:
"""Test Web UI cache integration with workflow"""
def test_task_deletion_removes_from_cache(self):
"""Test that deleting a task also removes it from processed issues cache"""
# This verifies the fix from P0-2 bug
result = subprocess.run(
[
"grep",
"-A",
"40",
"def delete_task",
f"{REPO_ROOT}/web/backend/services/queue_service.py",
],
capture_output=True,
text=True,
)
assert result.returncode == 0
code = result.stdout
# Verify cache removal is called
assert "_remove_from_processed_cache" in code
# Verify the function exists
verify_result = subprocess.run(
[
"grep",
"-c",
"def _remove_from_processed_cache",
f"{REPO_ROOT}/web/backend/services/queue_service.py",
],
capture_output=True,
text=True,
)
assert verify_result.returncode == 0
count = int(verify_result.stdout.strip())
assert count == 1, "_remove_from_processed_cache should be defined"
if __name__ == "__main__":
pytest.main([__file__, "-v"])
| """
Integration tests for multi-step workflow execution
Tests that validate multiple workflow steps working together:
- Worktree creation → tests → cleanup
- Error parsing → retry → success
- Multi-project task isolation
- PR creation → update → merge
"""
import pytest
import subprocess
import tempfile
import json
import time
from pathlib import Path
import shutil
# Compute repo root relative to this test file
REPO_ROOT = str(Path(__file__).resolve().parent.parent.parent)
@pytest.fixture
def test_project():
"""Create a temporary test project directory"""
temp_dir = tempfile.mkdtemp(prefix="lazy-bird-test-")
project_dir = Path(temp_dir) / "test-project"
# Initialize git repo
subprocess.run(["git", "init", str(project_dir)], check=True, capture_output=True)
subprocess.run(
["git", "config", "user.email", "test@example.com"],
cwd=project_dir,
check=True,
capture_output=True,
)
subprocess.run(
["git", "config", "user.name", "Test User"],
cwd=project_dir,
check=True,
capture_output=True,
)
# Create initial commit
(project_dir / "README.md").write_text("# Test Project")
subprocess.run(["git", "add", "."], cwd=project_dir, check=True, capture_output=True)
subprocess.run(
["git", "commit", "-m", "Initial commit"], cwd=project_dir, check=True, capture_output=True
)
| [] | yusufkaraaslan/lazy-bird | tests/integration/test_workflow_integration.py |
"""Integration tests for Webhooks API endpoints.
Tests all 6 Webhook subscription CRUD endpoints including test delivery.
"""
from datetime import datetime, timezone
from unittest.mock import AsyncMock, MagicMock, patch
from uuid import uuid4
import pytest
from fastapi.testclient import TestClient
from lazy_bird.api.main import app
from lazy_bird.models.webhook_subscription import WebhookSubscription
class TestListWebhooks:
"""Test GET /api/v1/webhooks endpoint."""
async def test_list_webhooks_empty(self, test_client, test_api_key):
"""Test listing webhooks when none exist."""
response = test_client.get(
"/api/v1/webhooks",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["total"] == 0
assert data["items"] == []
async def test_list_webhooks_pagination(self, test_client, test_api_key, test_db):
"""Test webhooks pagination."""
# Create 5 webhooks
for i in range(5):
webhook = WebhookSubscription(
url=f"https://example{i}.com/webhook",
secret=f"secret-key-{i}-16chars",
events=["task.completed"],
is_active=True,
failure_count=0,
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(webhook)
await test_db.commit()
# Get page 1 with page_size=2
response = test_client.get(
"/api/v1/webhooks?page=1&page_size=2",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["total"] == 5
assert len(data["items"]) == 2
assert data["pages"] == 3
async def test_list_webhooks_filter_by_event(self, test_client, test_api_key, test_db):
"""Test filtering webhooks by event type."""
# Create webhooks with different events
webhook1 = WebhookSubscription(
url="https://example1.com/webhook",
secret="secret-key-1-16chars",
events=["task.completed", "task.failed"],
is_active=True,
failure_count=0,
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
webhook2 = WebhookSubscription(
url="https://example2.com/webhook",
secret="secret-key-2-16chars",
events=["pr.created", "pr.merged"],
is_active=True,
failure_count=0,
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add_all([webhook1, webhook2])
await test_db.commit()
# Filter for task.completed
response = test_client.get(
"/api/v1/webhooks?event=task.completed",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["total"] == 1
assert "task.completed" in data["items"][0]["events"]
class TestCreateWebhook:
"""Test POST /api/v1/webhooks endpoint."""
async def test_create_webhook_success(self, test_client, test_api_key):
"""Test successful webhook subscription creation."""
payload = {
"url": "https://example.com/webhook",
"secret": "my-webhook-secret-16chars-minimum",
"events": ["task.completed", "task.failed", "pr.created"],
"is_active": True,
"description": "Production webhook for task events",
}
response = test_client.post(
"/api/v1/webhooks",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 201
data = response.json()
assert data["url"] == "https://example.com/webhook"
assert set(data["events"]) == {"task.completed", "task.failed", "pr.created"}
assert data["is_active"] is True
assert data["failure_count"] == 0
assert "id" in data
async def test_create_webhook_project_scoped(self, test_client, test_api_key, test_project):
"""Test creating project-scoped webhook."""
payload = {
"url": "https://example.com/webhook",
"secret": "project-webhook-secret-16chars",
"events": ["task.completed"],
"project_id": str(test_project.id),
}
response = test_client.post(
"/api/v1/webhooks",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 201
data = response.json()
assert data["project_id"] == str(test_project.id)
async def test_create_webhook_global(self, test_client, test_api_key):
"""Test creating global webhook (no project_id)."""
payload = {
"url": "https://example.com/webhook",
"secret": "global-webhook-secret-16chars",
"events": ["task.completed"],
}
response = test_client.post(
"/api/v1/webhooks",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 201
data = response.json()
assert data["project_id"] is None # Global webhook
async def test_create_webhook_project_not_found(self, test_client, test_api_key):
"""Test creating webhook for non-existent project."""
payload = {
"url": "https://example.com/webhook",
"secret": "webhook-secret-16chars",
"events": ["task.completed"],
"project_id": str(uuid4()),
}
response = test_client.post(
"/api/v1/webhooks",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 404
assert "not found" in response.json()["detail"].lower()
async def test_create_webhook_invalid_url(self, test_client, test_api_key):
"""Test creating webhook with invalid URL."""
payload = {
"url": "not-a-valid-url",
"secret": "webhook-secret-16chars",
"events": ["task.completed"],
}
response = test_client.post(
"/api/v1/webhooks",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 422 # Validation error
async def test_create_webhook_secret_too_short(self, test_client, test_api_key):
"""Test creating webhook with secret shorter than 16 chars."""
payload = {
"url": "https://example.com/webhook",
"secret": "short", # Less than 16 chars
"events": ["task.completed"],
}
response = test_client.post(
"/api/v1/webhooks",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 422 # Validation error
class TestGetWebhook:
"""Test GET /api/v1/webhooks/{subscription_id} endpoint."""
async def test_get_webhook_success(self, test_client, test_api_key, test_db):
"""Test getting single webhook subscription."""
webhook = WebhookSubscription(
url="https://example.com/webhook",
secret="webhook-secret-16chars",
events=["task.completed", "pr.created"],
is_active=True,
failure_count=3,
last_triggered_at=datetime.now(timezone.utc),
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(webhook)
await test_db.commit()
await test_db.refresh(webhook)
response = test_client.get(
f"/api/v1/webhooks/{webhook.id}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["id"] == str(webhook.id)
assert data["url"] == "https://example.com/webhook"
assert data["failure_count"] == 3
assert "last_triggered_at" in data
async def test_get_webhook_not_found(self, test_client, test_api_key):
"""Test getting non-existent webhook."""
response = test_client.get(
f"/api/v1/webhooks/{uuid4()}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 404
assert "not found" in response.json()["detail"].lower()
class TestUpdateWebhook:
"""Test PATCH /api/v1/webhooks/{subscription_id} endpoint."""
async def test_update_webhook_success(self, test_client, test_api_key, test_db):
"""Test successful webhook subscription update."""
webhook = WebhookSubscription(
url="https://example.com/webhook",
secret="old-webhook-secret-16chars",
events=["task.completed"],
is_active=True,
failure_count=0,
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(webhook)
await test_db.commit()
await test_db.refresh(webhook)
payload = {
"url": "https://new-example.com/webhook",
"events": ["task.completed", "task.failed", "pr.created"],
"description": "Updated webhook description",
}
response = test_client.patch(
f"/api/v1/webhooks/{webhook.id}",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["url"] == "https://new-example.com/webhook"
assert len(data["events"]) == 3
assert data["description"] == "Updated webhook description"
async def test_update_webhook_disable(self, test_client, test_api_key, test_db):
"""Test disabling webhook subscription."""
webhook = WebhookSubscription(
url="https://example.com/webhook",
secret="webhook-secret-16chars",
events=["task.completed"],
is_active=True,
failure_count=0,
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(webhook)
await test_db.commit()
await test_db.refresh(webhook)
payload = {"is_active": False}
response = test_client.patch(
f"/api/v1/webhooks/{webhook.id}",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["is_active"] is False
async def test_update_webhook_not_found(self, test_client, test_api_key):
"""Test updating non-existent webhook."""
payload = {"is_active": False}
response = test_client.patch(
f"/api/v1/webhooks/{uuid4()}",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 404
class TestDeleteWebhook:
"""Test DELETE /api/v1/webhooks/{subscription_id} endpoint."""
async def test_delete_webhook_success(self, test_client, test_api_key, test_db):
"""Test successful webhook subscription deletion."""
webhook = WebhookSubscription(
url="https://example.com/webhook",
secret="webhook-secret-16chars",
events=["task.completed"],
is_active=True,
failure_count=0,
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(webhook)
await test_db.commit()
await test_db.refresh(webhook)
webhook_id = webhook.id
response = test_client.delete(
f"/api/v1/webhooks/{webhook_id}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 204
# Verify deleted
get_response = test_client.get(
f"/api/v1/webhooks/{webhook_id}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert get_response.status_code == 404
async def test_delete_webhook_not_found(self, test_client, test_api_key):
"""Test deleting non-existent webhook."""
response = test_client.delete(
f"/api/v1/webhooks/{uuid4()}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 404
class TestWebhookTestDelivery:
"""Test POST /api/v1/webhooks/{subscription_id}/test endpoint."""
async def test_test_webhook_delivery_success(self, test_client, test_api_key, test_db):
"""Test successful test webhook delivery."""
webhook = WebhookSubscription(
url="https://example.com/webhook",
secret="webhook-secret-16chars",
events=["task.completed"],
is_active=True,
failure_count=0,
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(webhook)
await test_db.commit()
await test_db.refresh(webhook)
# Mock the test delivery to succeed
with patch("lazy_bird.services.webhook_service.send_test_webhook") as mock_test:
mock_test.return_value = {
"success": True,
"status_code": 200,
"response_time_ms": 123.45,
"message": "Test webhook delivered successfully",
}
response = test_client.post(
f"/api/v1/webhooks/{webhook.id}/test",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["success"] is True
assert data["status_code"] == 200
assert "message" in data
async def test_test_webhook_delivery_failure(self, test_client, test_api_key, test_db):
"""Test failed test webhook delivery."""
webhook = WebhookSubscription(
url="https://example.com/webhook",
secret="webhook-secret-16chars",
events=["task.completed"],
is_active=True,
failure_count=0,
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(webhook)
await test_db.commit()
await test_db.refresh(webhook)
# Mock the test delivery to fail
with patch("lazy_bird.services.webhook_service.send_test_webhook") as mock_test:
mock_test.return_value = {
"success": False,
"status_code": 500,
"error": "Internal Server Error",
"message": "Test webhook delivery failed: Internal Server Error",
}
response = test_client.post(
f"/api/v1/webhooks/{webhook.id}/test",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200 # Endpoint returns 200 even if delivery fails
data = response.json()
assert data["success"] is False
assert data["status_code"] == 500
async def test_test_webhook_delivery_not_found(self, test_client, test_api_key):
"""Test test delivery for non-existent webhook."""
response = test_client.post(
f"/api/v1/webhooks/{uuid4()}/test",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 404
class TestWebhookDeliveryStatistics:
"""Test webhook delivery statistics tracking."""
async def test_webhook_tracks_last_triggered_at(self, test_client, test_api_key, test_db):
"""Test that webhook tracks last successful delivery time."""
webhook = WebhookSubscription(
url="https://example.com/webhook",
secret="webhook-secret-16chars",
events=["task.completed"],
is_active=True,
failure_count=0,
last_triggered_at=None,
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(webhook)
await test_db.commit()
await test_db.refresh(webhook)
# Get webhook before any deliveries
response = test_client.get(
f"/api/v1/webhooks/{webhook.id}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["last_triggered_at"] is None
assert data["failure_count"] == 0
async def test_webhook_tracks_failure_count(self, test_client, test_api_key, test_db):
"""Test that webhook tracks consecutive failures."""
webhook = WebhookSubscription(
url="https://example.com/webhook",
secret="webhook-secret-16chars",
events=["task.completed"],
is_active=True,
failure_count=5,
last_failure_at=datetime.now(timezone.utc),
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(webhook)
await test_db.commit()
await test_db.refresh(webhook)
response = test_client.get(
f"/api/v1/webhooks/{webhook.id}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["failure_count"] == 5
assert data["last_failure_at"] is not None
| """Integration tests for Webhooks API endpoints.
Tests all 6 Webhook subscription CRUD endpoints including test delivery.
"""
from datetime import datetime, timezone
from unittest.mock import AsyncMock, MagicMock, patch
from uuid import uuid4
import pytest
from fastapi.testclient import TestClient
from lazy_bird.api.main import app
from lazy_bird.models.webhook_subscription import WebhookSubscription
class TestListWebhooks:
"""Test GET /api/v1/webhooks endpoint."""
async def test_list_webhooks_empty(self, test_client, test_api_key):
"""Test listing webhooks when none exist."""
response = test_client.get(
"/api/v1/webhooks",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["total"] == 0
assert data["items"] == []
async def test_list_webhooks_pagination(self, test_client, test_api_key, test_db):
"""Test webhooks pagination."""
# Create 5 webhooks
for i in range(5):
webhook = WebhookSubscription(
url=f"https://example{i}.com/webhook",
secret=f"secret-key-{i}-16chars",
events=["task.com | [
"# yusufkaraaslan/lazy-bird:lazy_bird/models/webhook_subscription.py\nWebhookSubscription"
] | yusufkaraaslan/lazy-bird | tests/integration/test_webhooks_api.py |
"""Integration tests for TaskRuns API endpoints.
Tests all 7 TaskRun CRUD endpoints with state machine validation.
"""
from datetime import datetime, timezone
from decimal import Decimal
from uuid import uuid4
import pytest
from fastapi.testclient import TestClient
from lazy_bird.api.main import app
from lazy_bird.models.claude_account import ClaudeAccount
from lazy_bird.models.framework_preset import FrameworkPreset
from lazy_bird.models.project import Project
from lazy_bird.models.task_run import TaskRun
class TestListTaskRuns:
"""Test GET /api/v1/task-runs endpoint."""
async def test_list_task_runs_empty(self, test_client, test_api_key):
"""Test listing task runs when none exist."""
response = test_client.get(
"/api/v1/task-runs",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["total"] == 0
assert data["items"] == []
assert data["page"] == 1
assert data["pages"] == 0
async def test_list_task_runs_pagination(
self, test_client, test_api_key, test_db, test_project
):
"""Test task runs pagination."""
# Create 5 task runs
for i in range(5):
task_run = TaskRun(
project_id=test_project.id,
work_item_id=str(100 + i),
prompt="Test task",
status="queued",
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(task_run)
await test_db.commit()
# Get page 1 with page_size=2
response = test_client.get(
"/api/v1/task-runs?page=1&page_size=2",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["total"] == 5
assert len(data["items"]) == 2
assert data["page"] == 1
assert data["pages"] == 3
async def test_list_task_runs_filter_by_status(
self, test_client, test_api_key, test_db, test_project
):
"""Test filtering task runs by status."""
# Create task runs with different statuses
statuses = ["queued", "running", "success", "failed"]
for status in statuses:
task_run = TaskRun(
project_id=test_project.id,
work_item_id="100",
prompt="Test task",
status=status,
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(task_run)
await test_db.commit()
# Filter for success only
response = test_client.get(
"/api/v1/task-runs?status=success",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["total"] == 1
assert data["items"][0]["status"] == "success"
async def test_list_task_runs_filter_by_project(self, test_client, test_api_key, test_db):
"""Test filtering task runs by project_id."""
# Create two projects
project1 = Project(
name="Project 1",
slug="project-1",
project_type="game_engine",
repo_url="https://github.com/user/project1",
max_cost_per_task_usd=Decimal("5.00"),
daily_cost_limit_usd=Decimal("50.00"),
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
project2 = Project(
name="Project 2",
slug="project-2",
project_type="backend",
repo_url="https://github.com/user/project2",
max_cost_per_task_usd=Decimal("5.00"),
daily_cost_limit_usd=Decimal("50.00"),
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add_all([project1, project2])
await test_db.commit()
await test_db.refresh(project1)
await test_db.refresh(project2)
# Create task runs for each project
task1 = TaskRun(
project_id=project1.id,
work_item_id="100",
prompt="Test task",
status="queued",
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
task2 = TaskRun(
project_id=project2.id,
work_item_id="200",
prompt="Test task",
status="queued",
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add_all([task1, task2])
await test_db.commit()
# Filter by project1
response = test_client.get(
f"/api/v1/task-runs?project_id={project1.id}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["total"] == 1
assert data["items"][0]["work_item_id"] == "100"
class TestQueueTaskRun:
"""Test POST /api/v1/task-runs endpoint."""
async def test_queue_task_run_success(self, test_client, test_api_key, test_db, test_project):
"""Test successful task run queueing."""
payload = {
"project_id": str(test_project.id),
"work_item_id": "42",
"work_item_title": "Add health system to player",
"work_item_url": "https://github.com/user/repo/issues/42",
"prompt": "Implement health system with take_damage and heal methods",
}
response = test_client.post(
"/api/v1/task-runs",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 201
data = response.json()
assert data["work_item_id"] == "42"
assert data["status"] == "queued"
assert data["work_item_title"] == "Add health system to player"
assert "id" in data
async def test_queue_task_run_project_not_found(self, test_client, test_api_key):
"""Test queueing task run for non-existent project."""
payload = {
"project_id": str(uuid4()),
"work_item_id": "42",
"prompt": "Test task",
}
response = test_client.post(
"/api/v1/task-runs",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 404
assert "not found" in response.json()["detail"].lower()
async def test_queue_task_run_automation_disabled(self, test_client, test_api_key, test_db):
"""Test queueing task run when automation is disabled."""
# Create project with automation disabled
project = Project(
name="Disabled Project",
slug="disabled-project",
project_type="backend",
repo_url="https://github.com/user/disabled",
max_cost_per_task_usd=Decimal("5.00"),
daily_cost_limit_usd=Decimal("50.00"),
automation_enabled=False,
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(project)
await test_db.commit()
await test_db.refresh(project)
payload = {
"project_id": str(project.id),
"work_item_id": "42",
"prompt": "Test task",
}
response = test_client.post(
"/api/v1/task-runs",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 409
assert "automation disabled" in response.json()["detail"].lower()
async def test_queue_task_run_concurrent_limit_exceeded(
self, test_client, test_api_key, test_db
):
"""Test queueing task run when concurrent limit is exceeded."""
# Create project with max_concurrent_tasks=1
project = Project(
name="Limited Project",
slug="limited-project",
project_type="backend",
repo_url="https://github.com/user/limited",
max_cost_per_task_usd=Decimal("5.00"),
daily_cost_limit_usd=Decimal("50.00"),
max_concurrent_tasks=1,
automation_enabled=True,
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(project)
await test_db.commit()
await test_db.refresh(project)
# Create one running task
task_run = TaskRun(
project_id=project.id,
work_item_id="100",
prompt="Test task",
status="running",
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(task_run)
await test_db.commit()
# Try to queue another
payload = {
"project_id": str(project.id),
"work_item_id": "42",
"prompt": "Test task",
}
response = test_client.post(
"/api/v1/task-runs",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 409
assert "concurrent" in response.json()["detail"].lower()
class TestGetTaskRun:
"""Test GET /api/v1/task-runs/{task_run_id} endpoint."""
async def test_get_task_run_success(self, test_client, test_api_key, test_db, test_project):
"""Test getting single task run."""
task_run = TaskRun(
project_id=test_project.id,
work_item_id="42",
prompt="Test task",
status="running",
cost_usd=Decimal("0.15"),
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(task_run)
await test_db.commit()
await test_db.refresh(task_run)
response = test_client.get(
f"/api/v1/task-runs/{task_run.id}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["id"] == str(task_run.id)
assert data["work_item_id"] == "42"
assert data["status"] == "running"
assert Decimal(data["cost_usd"]) == Decimal("0.15")
async def test_get_task_run_not_found(self, test_client, test_api_key):
"""Test getting non-existent task run."""
response = test_client.get(
f"/api/v1/task-runs/{uuid4()}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 404
assert "not found" in response.json()["detail"].lower()
class TestUpdateTaskRun:
"""Test PATCH /api/v1/task-runs/{task_run_id} endpoint."""
async def test_update_task_run_success(self, test_client, test_api_key, test_db, test_project):
"""Test successful task run update."""
task_run = TaskRun(
project_id=test_project.id,
work_item_id="42",
prompt="Test task",
status="queued",
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(task_run)
await test_db.commit()
await test_db.refresh(task_run)
payload = {
"status": "running",
"cost_usd": "0.25",
}
response = test_client.patch(
f"/api/v1/task-runs/{task_run.id}",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["status"] == "running"
assert Decimal(data["cost_usd"]) == Decimal("0.25")
async def test_update_task_run_invalid_status_transition(
self, test_client, test_api_key, test_db, test_project
):
"""Test invalid status transition (success -> queued not allowed)."""
task_run = TaskRun(
project_id=test_project.id,
work_item_id="42",
prompt="Test task",
status="success",
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(task_run)
await test_db.commit()
await test_db.refresh(task_run)
payload = {"status": "queued"}
response = test_client.patch(
f"/api/v1/task-runs/{task_run.id}",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 409
assert "transition" in response.json()["detail"].lower()
async def test_update_task_run_not_found(self, test_client, test_api_key):
"""Test updating non-existent task run."""
payload = {"status": "running"}
response = test_client.patch(
f"/api/v1/task-runs/{uuid4()}",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 404
class TestCancelTaskRun:
"""Test POST /api/v1/task-runs/{task_run_id}/cancel endpoint."""
async def test_cancel_task_run_success(self, test_client, test_api_key, test_db, test_project):
"""Test successful task run cancellation."""
task_run = TaskRun(
project_id=test_project.id,
work_item_id="42",
prompt="Test task",
status="running",
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(task_run)
await test_db.commit()
await test_db.refresh(task_run)
response = test_client.post(
f"/api/v1/task-runs/{task_run.id}/cancel",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["status"] == "cancelled"
async def test_cancel_task_run_already_completed(
self, test_client, test_api_key, test_db, test_project
):
"""Test cancelling already completed task run."""
task_run = TaskRun(
project_id=test_project.id,
work_item_id="42",
prompt="Test task",
status="success",
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(task_run)
await test_db.commit()
await test_db.refresh(task_run)
response = test_client.post(
f"/api/v1/task-runs/{task_run.id}/cancel",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 409
assert "cannot cancel" in response.json()["detail"].lower()
async def test_cancel_task_run_not_found(self, test_client, test_api_key):
"""Test cancelling non-existent task run."""
response = test_client.post(
f"/api/v1/task-runs/{uuid4()}/cancel",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 404
class TestRetryTaskRun:
"""Test POST /api/v1/task-runs/{task_run_id}/retry endpoint."""
async def test_retry_task_run_success(self, test_client, test_api_key, test_db, test_project):
"""Test successful task run retry."""
task_run = TaskRun(
project_id=test_project.id,
work_item_id="42",
prompt="Test task",
status="failed",
error_message="Test error",
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(task_run)
await test_db.commit()
await test_db.refresh(task_run)
response = test_client.post(
f"/api/v1/task-runs/{task_run.id}/retry",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["status"] == "queued"
assert data["retry_count"] == 1
assert data["error_message"] is None
async def test_retry_task_run_not_failed(
self, test_client, test_api_key, test_db, test_project
):
"""Test retrying non-failed task run."""
task_run = TaskRun(
project_id=test_project.id,
work_item_id="42",
prompt="Test task",
status="success",
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(task_run)
await test_db.commit()
await test_db.refresh(task_run)
response = test_client.post(
f"/api/v1/task-runs/{task_run.id}/retry",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 409
assert "cannot retry" in response.json()["detail"].lower()
async def test_retry_task_run_not_found(self, test_client, test_api_key):
"""Test retrying non-existent task run."""
response = test_client.post(
f"/api/v1/task-runs/{uuid4()}/retry",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 404
class TestDeleteTaskRun:
"""Test DELETE /api/v1/task-runs/{task_run_id} endpoint."""
async def test_delete_task_run_success(self, test_client, test_api_key, test_db, test_project):
"""Test successful task run deletion."""
task_run = TaskRun(
project_id=test_project.id,
work_item_id="42",
prompt="Test task",
status="success",
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(task_run)
await test_db.commit()
await test_db.refresh(task_run)
task_run_id = task_run.id
response = test_client.delete(
f"/api/v1/task-runs/{task_run_id}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 204
# Verify deleted
get_response = test_client.get(
f"/api/v1/task-runs/{task_run_id}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert get_response.status_code == 404
async def test_delete_task_run_not_found(self, test_client, test_api_key):
"""Test deleting non-existent task run."""
response = test_client.delete(
f"/api/v1/task-runs/{uuid4()}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 404
| """Integration tests for TaskRuns API endpoints.
Tests all 7 TaskRun CRUD endpoints with state machine validation.
"""
from datetime import datetime, timezone
from decimal import Decimal
from uuid import uuid4
import pytest
from fastapi.testclient import TestClient
from lazy_bird.api.main import app
from lazy_bird.models.claude_account import ClaudeAccount
from lazy_bird.models.framework_preset import FrameworkPreset
from lazy_bird.models.project import Project
from lazy_bird.models.task_run import TaskRun
class TestListTaskRuns:
"""Test GET /api/v1/task-runs endpoint."""
async def test_list_task_runs_empty(self, test_client, test_api_key):
"""Test listing task runs when none exist."""
response = test_client.get(
"/api/v1/task-runs",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["total"] == 0
assert data["items"] == []
assert data["page"] == 1
assert data["pages"] == 0
async def test_list_task_runs_pagination(
self, test_client, test_api_key, test_db, test_project
):
"""Test task runs pagination."""
# Create 5 task runs
for i in range(5):
task_run = TaskRun(
project | [
"# yusufkaraaslan/lazy-bird:lazy_bird/models/claude_account.py\nClaudeAccount",
"# yusufkaraaslan/lazy-bird:lazy_bird/models/framework_preset.py\nFrameworkPreset",
"# yusufkaraaslan/lazy-bird:lazy_bird/models/project.py\nProject",
"# yusufkaraaslan/lazy-bird:lazy_bird/models/task_run.py\nTaskRun"
] | yusufkaraaslan/lazy-bird | tests/integration/test_task_runs_api.py |
"""Integration tests for Projects API endpoints.
Tests all CRUD operations, validation, pagination, filtering, and search.
"""
import pytest
from decimal import Decimal
from uuid import UUID
pytestmark = pytest.mark.asyncio
class TestListProjects:
"""Test GET /api/v1/projects - List projects with pagination."""
async def test_list_projects_empty(self, test_client, test_api_key):
"""Test listing projects when database is empty."""
response = test_client.get(
"/api/v1/projects",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["total"] == 0
assert data["items"] == []
assert data["page"] == 1
assert data["pages"] == 0
async def test_list_projects_with_data(self, test_client, test_api_key, test_project):
"""Test listing projects with existing data."""
response = test_client.get(
"/api/v1/projects",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["total"] == 1
assert len(data["items"]) == 1
assert data["items"][0]["slug"] == "test-project"
assert data["items"][0]["name"] == "Test Project"
async def test_list_projects_pagination(self, test_client, test_api_key, test_db):
"""Test pagination with multiple projects."""
from lazy_bird.models.project import Project
from datetime import datetime, timezone
from decimal import Decimal
# Create 5 projects
for i in range(5):
project = Project(
name=f"Project {i}",
slug=f"project-{i}",
repo_url=f"https://github.com/user/project-{i}",
default_branch="main",
project_type="python",
max_cost_per_task_usd=Decimal("5.00"),
daily_cost_limit_usd=Decimal("50.00"),
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(project)
await test_db.commit()
# Test page 1 with page_size=2
response = test_client.get(
"/api/v1/projects?page=1&page_size=2",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["total"] == 5
assert len(data["items"]) == 2
assert data["page"] == 1
assert data["page_size"] == 2
assert data["pages"] == 3
async def test_list_projects_filter_by_type(self, test_client, test_api_key, test_db):
"""Test filtering projects by project_type."""
from lazy_bird.models.project import Project
from datetime import datetime, timezone
from decimal import Decimal
# Create projects with different types
for project_type in ["python", "godot", "rust"]:
project = Project(
name=f"{project_type.title()} Project",
slug=f"{project_type}-project",
repo_url=f"https://github.com/user/{project_type}",
default_branch="main",
project_type=project_type,
max_cost_per_task_usd=Decimal("5.00"),
daily_cost_limit_usd=Decimal("50.00"),
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(project)
await test_db.commit()
# Filter by Python projects
response = test_client.get(
"/api/v1/projects?project_type=python",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["total"] == 1
assert data["items"][0]["project_type"] == "python"
async def test_list_projects_search(self, test_client, test_api_key, test_db):
"""Test searching projects by name, slug, repo_url."""
from lazy_bird.models.project import Project
from datetime import datetime, timezone
from decimal import Decimal
# Create projects
projects_data = [
("My Game", "my-game", "https://github.com/user/game"),
("Backend API", "backend-api", "https://github.com/user/api"),
("Frontend UI", "frontend-ui", "https://github.com/user/ui"),
]
for name, slug, repo_url in projects_data:
project = Project(
name=name,
slug=slug,
repo_url=repo_url,
default_branch="main",
project_type="python",
max_cost_per_task_usd=Decimal("5.00"),
daily_cost_limit_usd=Decimal("50.00"),
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(project)
await test_db.commit()
# Search for "game"
response = test_client.get(
"/api/v1/projects?search=game",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["total"] == 1
assert data["items"][0]["slug"] == "my-game"
async def test_list_projects_exclude_deleted(self, test_client, test_api_key, test_db):
"""Test that soft-deleted projects are excluded by default."""
from lazy_bird.models.project import Project
from datetime import datetime, timezone
from decimal import Decimal
# Create active project
active_project = Project(
name="Active Project",
slug="active-project",
repo_url="https://github.com/user/active",
default_branch="main",
project_type="python",
max_cost_per_task_usd=Decimal("5.00"),
daily_cost_limit_usd=Decimal("50.00"),
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(active_project)
# Create deleted project
deleted_project = Project(
name="Deleted Project",
slug="deleted-project",
repo_url="https://github.com/user/deleted",
default_branch="main",
project_type="python",
max_cost_per_task_usd=Decimal("5.00"),
daily_cost_limit_usd=Decimal("50.00"),
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
deleted_at=datetime.now(timezone.utc),
)
test_db.add(deleted_project)
await test_db.commit()
# List without include_deleted (should only show active)
response = test_client.get(
"/api/v1/projects",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["total"] == 1
assert data["items"][0]["slug"] == "active-project"
class TestCreateProject:
"""Test POST /api/v1/projects - Create new project."""
async def test_create_project_minimal(self, test_client, test_api_key):
"""Test creating project with minimal required fields."""
project_data = {
"name": "New Project",
"slug": "new-project",
"repo_url": "https://github.com/user/new-project",
"project_type": "python",
}
response = test_client.post(
"/api/v1/projects",
json=project_data,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 201
data = response.json()
assert data["name"] == "New Project"
assert data["slug"] == "new-project"
assert data["project_type"] == "python"
assert "id" in data
assert data["automation_enabled"] is False # Default value
async def test_create_project_full(self, test_client, test_api_key):
"""Test creating project with all optional fields."""
project_data = {
"name": "Full Project",
"slug": "full-project",
"repo_url": "https://github.com/user/full",
"default_branch": "develop",
"project_type": "python",
"test_command": "pytest tests/",
"build_command": "python -m build",
"lint_command": "flake8 .",
"format_command": "black .",
"automation_enabled": True,
"max_concurrent_tasks": 5,
"task_timeout_seconds": 3600,
"max_cost_per_task_usd": "10.00",
"daily_cost_limit_usd": "100.00",
}
response = test_client.post(
"/api/v1/projects",
json=project_data,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 201
data = response.json()
assert data["automation_enabled"] is True
assert data["max_concurrent_tasks"] == 5
assert data["test_command"] == "pytest tests/"
async def test_create_project_duplicate_slug(self, test_client, test_api_key, test_project):
"""Test creating project with duplicate slug returns 409 Conflict."""
project_data = {
"name": "Duplicate",
"slug": "test-project", # Same as test_project fixture
"repo_url": "https://github.com/user/duplicate",
"project_type": "python",
}
response = test_client.post(
"/api/v1/projects",
json=project_data,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 409
data = response.json()
assert "slug" in data["detail"].lower() or "already exists" in data["detail"].lower()
async def test_create_project_invalid_slug(self, test_client, test_api_key):
"""Test creating project with invalid slug format returns 422."""
project_data = {
"name": "Invalid",
"slug": "Invalid_Slug!", # Invalid: uppercase and special chars
"repo_url": "https://github.com/user/invalid",
"project_type": "python",
}
response = test_client.post(
"/api/v1/projects",
json=project_data,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 422
async def test_create_project_missing_required_fields(self, test_client, test_api_key):
"""Test creating project without required fields returns 422."""
project_data = {
"name": "Incomplete",
# Missing: slug, repo_url, project_type
}
response = test_client.post(
"/api/v1/projects",
json=project_data,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 422
class TestGetProject:
"""Test GET /api/v1/projects/{project_id} - Get single project."""
async def test_get_project_by_id(self, test_client, test_api_key, test_project):
"""Test getting project by ID."""
response = test_client.get(
f"/api/v1/projects/{test_project.id}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["id"] == str(test_project.id)
assert data["slug"] == "test-project"
assert data["name"] == "Test Project"
async def test_get_project_not_found(self, test_client, test_api_key):
"""Test getting non-existent project returns 404."""
fake_uuid = "12345678-1234-1234-1234-123456789012"
response = test_client.get(
f"/api/v1/projects/{fake_uuid}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 404
data = response.json()
assert "not found" in data["detail"].lower()
async def test_get_deleted_project_returns_404(
self, test_client, test_api_key, test_project, test_db
):
"""Test getting soft-deleted project returns 404."""
from datetime import datetime, timezone
# Soft delete the project
test_db.add(test_project)
test_project.deleted_at = datetime.now(timezone.utc)
await test_db.commit()
response = test_client.get(
f"/api/v1/projects/{test_project.id}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 404
class TestUpdateProject:
"""Test PATCH /api/v1/projects/{project_id} - Update project."""
async def test_update_project_single_field(self, test_client, test_api_key, test_project):
"""Test updating single field."""
update_data = {"name": "Updated Name"}
response = test_client.patch(
f"/api/v1/projects/{test_project.id}",
json=update_data,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["name"] == "Updated Name"
assert data["slug"] == "test-project" # Unchanged
async def test_update_project_multiple_fields(self, test_client, test_api_key, test_project):
"""Test updating multiple fields."""
update_data = {
"automation_enabled": False,
"max_concurrent_tasks": 10,
"daily_cost_limit_usd": "200.00",
}
response = test_client.patch(
f"/api/v1/projects/{test_project.id}",
json=update_data,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["automation_enabled"] is False
assert data["max_concurrent_tasks"] == 10
async def test_update_project_duplicate_slug(
self, test_client, test_api_key, test_project, test_db
):
"""Test updating slug to existing slug returns 409."""
from lazy_bird.models.project import Project
from datetime import datetime, timezone
from decimal import Decimal
# Create another project
other_project = Project(
name="Other Project",
slug="other-project",
repo_url="https://github.com/user/other",
default_branch="main",
project_type="python",
max_cost_per_task_usd=Decimal("5.00"),
daily_cost_limit_usd=Decimal("50.00"),
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(other_project)
await test_db.commit()
# Try to update test_project slug to match other_project
update_data = {"slug": "other-project"}
response = test_client.patch(
f"/api/v1/projects/{test_project.id}",
json=update_data,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 409
async def test_update_project_not_found(self, test_client, test_api_key):
"""Test updating non-existent project returns 404."""
fake_uuid = "12345678-1234-1234-1234-123456789012"
update_data = {"name": "Updated"}
response = test_client.patch(
f"/api/v1/projects/{fake_uuid}",
json=update_data,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 404
async def test_update_project_empty_body(self, test_client, test_api_key, test_project):
"""Test updating with empty body returns existing project."""
update_data = {}
response = test_client.patch(
f"/api/v1/projects/{test_project.id}",
json=update_data,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["name"] == "Test Project" # Unchanged
class TestDeleteProject:
"""Test DELETE /api/v1/projects/{project_id} - Soft delete project."""
async def test_delete_project(self, test_client, test_api_key, test_project):
"""Test soft deleting project."""
response = test_client.delete(
f"/api/v1/projects/{test_project.id}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 204
assert response.content == b"" # No response body
async def test_delete_project_verify_soft_delete(self, test_client, test_api_key, test_project):
"""Test that deleted project is soft deleted, not hard deleted."""
# Delete project
response = test_client.delete(
f"/api/v1/projects/{test_project.id}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 204
# Try to get project (should return 404)
response = test_client.get(
f"/api/v1/projects/{test_project.id}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 404
# Verify project still exists in database but with deleted_at set
# (would need direct DB access to verify)
async def test_delete_project_not_found(self, test_client, test_api_key):
"""Test deleting non-existent project returns 404."""
fake_uuid = "12345678-1234-1234-1234-123456789012"
response = test_client.delete(
f"/api/v1/projects/{fake_uuid}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 404
async def test_delete_already_deleted_project(
self, test_client, test_api_key, test_project, test_db
):
"""Test deleting already-deleted project returns 404."""
from datetime import datetime, timezone
# Soft delete the project manually
test_db.add(test_project)
test_project.deleted_at = datetime.now(timezone.utc)
await test_db.commit()
# Try to delete again
response = test_client.delete(
f"/api/v1/projects/{test_project.id}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 404
class TestAuthentication:
"""Test API key authentication."""
async def test_missing_api_key(self, test_client):
"""Test accessing endpoint without API key returns 401/403."""
response = test_client.get("/api/v1/projects")
assert response.status_code in [401, 403]
async def test_invalid_api_key(self, test_client):
"""Test accessing endpoint with invalid API key returns 401/403."""
response = test_client.get(
"/api/v1/projects",
headers={"X-API-Key": "invalid-key"},
)
assert response.status_code in [401, 403]
| """Integration tests for Projects API endpoints.
Tests all CRUD operations, validation, pagination, filtering, and search.
"""
import pytest
from decimal import Decimal
from uuid import UUID
pytestmark = pytest.mark.asyncio
class TestListProjects:
"""Test GET /api/v1/projects - List projects with pagination."""
async def test_list_projects_empty(self, test_client, test_api_key):
"""Test listing projects when database is empty."""
response = test_client.get(
"/api/v1/projects",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["total"] == 0
assert data["items"] == []
assert data["page"] == 1
assert data["pages"] == 0
async def test_list_projects_with_data(self, test_client, test_api_key, test_project):
"""Test listing projects with existing data."""
response = test_client.get(
"/api/v1/projects",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["total"] == 1
assert len(data["items"]) == 1
assert data["items"][0]["slug"] == | [] | yusufkaraaslan/lazy-bird | tests/integration/test_projects_api.py |
"""Integration tests for Health API endpoints.
Tests all 4 health check endpoints (comprehensive, liveness, readiness, startup).
"""
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from fastapi.testclient import TestClient
from lazy_bird.api.main import app
class TestHealthCheck:
"""Test GET /api/v1/health endpoint (comprehensive check)."""
def test_health_check_success(self, test_client):
"""Test comprehensive health check when all services are healthy."""
with patch("lazy_bird.api.routers.health.check_database") as mock_db, patch(
"lazy_bird.api.routers.health.check_redis_status"
) as mock_redis, patch(
"lazy_bird.api.routers.health.check_celery_status"
) as mock_celery, patch(
"lazy_bird.api.routers.health.get_system_metrics"
) as mock_metrics:
# Mock all services as healthy
mock_db.return_value = {"status": "healthy", "mode": "async"}
mock_redis.return_value = {"status": "healthy"}
mock_celery.return_value = {"status": "not_configured"}
mock_metrics.return_value = {
"cpu": {"usage_percent": 25.5, "cores": 8},
"memory": {"total_mb": 16384, "used_mb": 8192, "percent": 50.0},
"disk": {"total_gb": 500, "used_gb": 250, "percent": 50.0},
}
response = test_client.get("/api/v1/health")
assert response.status_code == 200
data = response.json()
assert data["status"] == "healthy"
assert "timestamp" in data
assert "version" in data
assert "environment" in data
assert "services" in data
assert data["services"]["database"]["status"] == "healthy"
assert data["services"]["redis"]["status"] == "healthy"
assert "system" in data
assert "cpu" in data["system"]
assert "memory" in data["system"]
assert "disk" in data["system"]
def test_health_check_database_unhealthy(self, test_client):
"""Test health check when database is unhealthy."""
with patch("lazy_bird.api.routers.health.check_database") as mock_db, patch(
"lazy_bird.api.routers.health.check_redis_status"
) as mock_redis, patch(
"lazy_bird.api.routers.health.check_celery_status"
) as mock_celery, patch(
"lazy_bird.api.routers.health.get_system_metrics"
) as mock_metrics:
mock_db.return_value = {"status": "unhealthy", "error": "Connection refused"}
mock_redis.return_value = {"status": "healthy"}
mock_celery.return_value = {"status": "not_configured"}
mock_metrics.return_value = {"cpu": {}, "memory": {}, "disk": {}}
response = test_client.get("/api/v1/health")
assert response.status_code == 503 # Service Unavailable
data = response.json()
assert data["status"] == "unhealthy"
assert data["services"]["database"]["status"] == "unhealthy"
def test_health_check_redis_unhealthy(self, test_client):
"""Test health check when Redis is unhealthy."""
with patch("lazy_bird.api.routers.health.check_database") as mock_db, patch(
"lazy_bird.api.routers.health.check_redis_status"
) as mock_redis, patch(
"lazy_bird.api.routers.health.check_celery_status"
) as mock_celery, patch(
"lazy_bird.api.routers.health.get_system_metrics"
) as mock_metrics:
mock_db.return_value = {"status": "healthy"}
mock_redis.return_value = {"status": "unhealthy", "error": "Connection timeout"}
mock_celery.return_value = {"status": "not_configured"}
mock_metrics.return_value = {"cpu": {}, "memory": {}, "disk": {}}
response = test_client.get("/api/v1/health")
assert response.status_code == 503
data = response.json()
assert data["status"] == "unhealthy"
assert data["services"]["redis"]["status"] == "unhealthy"
def test_health_check_celery_degraded(self, test_client):
"""Test health check when Celery is degraded (still returns 200)."""
with patch("lazy_bird.api.routers.health.check_database") as mock_db, patch(
"lazy_bird.api.routers.health.check_redis_status"
) as mock_redis, patch(
"lazy_bird.api.routers.health.check_celery_status"
) as mock_celery, patch(
"lazy_bird.api.routers.health.get_system_metrics"
) as mock_metrics:
mock_db.return_value = {"status": "healthy"}
mock_redis.return_value = {"status": "healthy"}
mock_celery.return_value = {
"status": "degraded",
"workers": 0,
"message": "No active workers",
}
mock_metrics.return_value = {"cpu": {}, "memory": {}, "disk": {}}
response = test_client.get("/api/v1/health")
assert response.status_code == 200 # Degraded Celery is OK
data = response.json()
assert data["status"] == "healthy"
assert data["services"]["celery"]["status"] == "degraded"
def test_health_check_with_trailing_slash(self, test_client):
"""Test health check with trailing slash."""
with patch("lazy_bird.api.routers.health.check_database") as mock_db, patch(
"lazy_bird.api.routers.health.check_redis_status"
) as mock_redis, patch(
"lazy_bird.api.routers.health.check_celery_status"
) as mock_celery, patch(
"lazy_bird.api.routers.health.get_system_metrics"
) as mock_metrics:
mock_db.return_value = {"status": "healthy"}
mock_redis.return_value = {"status": "healthy"}
mock_celery.return_value = {"status": "not_configured"}
mock_metrics.return_value = {"cpu": {}, "memory": {}, "disk": {}}
response = test_client.get("/api/v1/health/")
assert response.status_code == 200
data = response.json()
assert data["status"] == "healthy"
class TestLivenessProbe:
"""Test GET /api/v1/health/live endpoint."""
def test_liveness_probe_success(self, test_client):
"""Test liveness probe always returns success."""
response = test_client.get("/api/v1/health/live")
assert response.status_code == 200
data = response.json()
assert data["status"] == "alive"
assert "timestamp" in data
def test_liveness_probe_no_authentication(self, test_client):
"""Test liveness probe doesn't require authentication."""
# No API key header provided
response = test_client.get("/api/v1/health/live")
assert response.status_code == 200 # Should still work
class TestReadinessProbe:
"""Test GET /api/v1/health/ready endpoint."""
def test_readiness_probe_ready(self, test_client):
"""Test readiness probe when services are ready."""
with patch("lazy_bird.api.routers.health.check_database") as mock_db, patch(
"lazy_bird.api.routers.health.check_redis_status"
) as mock_redis:
mock_db.return_value = {"status": "healthy"}
mock_redis.return_value = {"status": "healthy"}
response = test_client.get("/api/v1/health/ready")
assert response.status_code == 200
data = response.json()
assert data["status"] == "ready"
assert data["services"]["database"] == "healthy"
assert data["services"]["redis"] == "healthy"
def test_readiness_probe_not_ready_database(self, test_client):
"""Test readiness probe when database is not ready."""
with patch("lazy_bird.api.routers.health.check_database") as mock_db, patch(
"lazy_bird.api.routers.health.check_redis_status"
) as mock_redis:
mock_db.return_value = {"status": "unhealthy"}
mock_redis.return_value = {"status": "healthy"}
response = test_client.get("/api/v1/health/ready")
assert response.status_code == 503
data = response.json()
assert data["status"] == "not_ready"
def test_readiness_probe_not_ready_redis(self, test_client):
"""Test readiness probe when Redis is not ready."""
with patch("lazy_bird.api.routers.health.check_database") as mock_db, patch(
"lazy_bird.api.routers.health.check_redis_status"
) as mock_redis:
mock_db.return_value = {"status": "healthy"}
mock_redis.return_value = {"status": "unhealthy"}
response = test_client.get("/api/v1/health/ready")
assert response.status_code == 503
data = response.json()
assert data["status"] == "not_ready"
def test_readiness_probe_no_celery_check(self, test_client):
"""Test that readiness probe doesn't check Celery (only critical services)."""
with patch("lazy_bird.api.routers.health.check_database") as mock_db, patch(
"lazy_bird.api.routers.health.check_redis_status"
) as mock_redis, patch("lazy_bird.api.routers.health.check_celery_status") as mock_celery:
mock_db.return_value = {"status": "healthy"}
mock_redis.return_value = {"status": "healthy"}
# Celery check should not be called
mock_celery.return_value = {"status": "unhealthy"}
response = test_client.get("/api/v1/health/ready")
assert response.status_code == 200 # Still ready even if Celery is down
# Celery should not be called
mock_celery.assert_not_called()
class TestStartupProbe:
"""Test GET /api/v1/health/startup endpoint."""
def test_startup_probe_started(self, test_client):
"""Test startup probe when application has started."""
with patch("lazy_bird.api.routers.health.check_database") as mock_db:
mock_db.return_value = {"status": "healthy"}
response = test_client.get("/api/v1/health/startup")
assert response.status_code == 200
data = response.json()
assert data["status"] == "started"
assert data["database"] == "healthy"
def test_startup_probe_starting(self, test_client):
"""Test startup probe when application is still starting."""
with patch("lazy_bird.api.routers.health.check_database") as mock_db:
mock_db.return_value = {"status": "unhealthy", "error": "Starting up"}
response = test_client.get("/api/v1/health/startup")
assert response.status_code == 503
data = response.json()
assert data["status"] == "starting"
def test_startup_probe_only_checks_database(self, test_client):
"""Test that startup probe only checks database (most lenient)."""
with patch("lazy_bird.api.routers.health.check_database") as mock_db, patch(
"lazy_bird.api.routers.health.check_redis_status"
) as mock_redis:
mock_db.return_value = {"status": "healthy"}
# Redis check should not be called
mock_redis.return_value = {"status": "unhealthy"}
response = test_client.get("/api/v1/health/startup")
assert response.status_code == 200 # Started even if Redis is down
# Redis should not be called
mock_redis.assert_not_called()
class TestHealthProbeHierarchy:
"""Test the hierarchy of health probes."""
def test_startup_most_lenient(self, test_client):
"""Test that startup probe is the most lenient (only needs DB)."""
with patch("lazy_bird.api.routers.health.check_database") as mock_db:
mock_db.return_value = {"status": "healthy"}
response = test_client.get("/api/v1/health/startup")
assert response.status_code == 200
data = response.json()
assert data["status"] == "started"
def test_readiness_checks_db_and_redis(self, test_client):
"""Test that readiness probe needs both DB and Redis."""
with patch("lazy_bird.api.routers.health.check_database") as mock_db, patch(
"lazy_bird.api.routers.health.check_redis_status"
) as mock_redis:
mock_db.return_value = {"status": "healthy"}
mock_redis.return_value = {"status": "healthy"}
response = test_client.get("/api/v1/health/ready")
assert response.status_code == 200
# Both checks should be called
mock_db.assert_called_once()
mock_redis.assert_called_once()
def test_comprehensive_checks_all_services(self, test_client):
"""Test that comprehensive health check verifies all services."""
with patch("lazy_bird.api.routers.health.check_database") as mock_db, patch(
"lazy_bird.api.routers.health.check_redis_status"
) as mock_redis, patch(
"lazy_bird.api.routers.health.check_celery_status"
) as mock_celery, patch(
"lazy_bird.api.routers.health.get_system_metrics"
) as mock_metrics:
mock_db.return_value = {"status": "healthy"}
mock_redis.return_value = {"status": "healthy"}
mock_celery.return_value = {"status": "not_configured"}
mock_metrics.return_value = {"cpu": {}, "memory": {}, "disk": {}}
response = test_client.get("/api/v1/health")
assert response.status_code == 200
# All checks should be called
mock_db.assert_called_once()
mock_redis.assert_called_once()
mock_celery.assert_called_once()
mock_metrics.assert_called_once()
class TestKubernetesIntegration:
"""Test Kubernetes probe integration patterns."""
def test_startup_then_liveness_then_readiness(self, test_client):
"""Test typical Kubernetes probe sequence."""
with patch("lazy_bird.api.routers.health.check_database") as mock_db, patch(
"lazy_bird.api.routers.health.check_redis_status"
) as mock_redis:
# Step 1: Startup probe (database initializing)
mock_db.return_value = {"status": "unhealthy"}
startup_response = test_client.get("/api/v1/health/startup")
assert startup_response.status_code == 503
# Step 2: Startup probe (database ready)
mock_db.return_value = {"status": "healthy"}
startup_response = test_client.get("/api/v1/health/startup")
assert startup_response.status_code == 200
# Step 3: Liveness probe (always alive)
liveness_response = test_client.get("/api/v1/health/live")
assert liveness_response.status_code == 200
# Step 4: Readiness probe (Redis not ready yet)
mock_redis.return_value = {"status": "unhealthy"}
readiness_response = test_client.get("/api/v1/health/ready")
assert readiness_response.status_code == 503
# Step 5: Readiness probe (all services ready)
mock_redis.return_value = {"status": "healthy"}
readiness_response = test_client.get("/api/v1/health/ready")
assert readiness_response.status_code == 200
def test_liveness_failure_indicates_restart_needed(self, test_client):
"""Test that liveness probe failure (if it ever fails) indicates restart."""
# Liveness probe should always return 200
# This test documents the behavior
response = test_client.get("/api/v1/health/live")
assert response.status_code == 200
assert response.json()["status"] == "alive"
| """Integration tests for Health API endpoints.
Tests all 4 health check endpoints (comprehensive, liveness, readiness, startup).
"""
from unittest.mock import AsyncMock, MagicMock, patch
import pytest
from fastapi.testclient import TestClient
from lazy_bird.api.main import app
class TestHealthCheck:
"""Test GET /api/v1/health endpoint (comprehensive check)."""
def test_health_check_success(self, test_client):
"""Test comprehensive health check when all services are healthy."""
with patch("lazy_bird.api.routers.health.check_database") as mock_db, patch(
"lazy_bird.api.routers.health.check_redis_status"
) as mock_redis, patch(
"lazy_bird.api.routers.health.check_celery_status"
) as mock_celery, patch(
"lazy_bird.api.routers.health.get_system_metrics"
) as mock_metrics:
# Mock all services as healthy
mock_db.return_value = {"status": "healthy", "mode": "async"}
mock_redis.return_value = {"status": "healthy"}
mock_celery.return_value = {"status": "not_configured"}
mock_metrics.return_value = {
"cpu": {"usage_percent": 25.5, "cores": 8},
"memory | [] | yusufkaraaslan/lazy-bird | tests/integration/test_health_api.py |
"""Integration tests for FrameworkPresets API endpoints.
Tests all 5 FrameworkPreset CRUD endpoints with built-in protection.
"""
from datetime import datetime, timezone
from uuid import uuid4
import pytest
from fastapi.testclient import TestClient
from lazy_bird.api.main import app
from lazy_bird.models.framework_preset import FrameworkPreset
class TestListFrameworkPresets:
"""Test GET /api/v1/framework-presets endpoint."""
async def test_list_framework_presets_empty(self, test_client, test_api_key):
"""Test listing framework presets when none exist."""
response = test_client.get(
"/api/v1/framework-presets",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["total"] == 0
assert data["items"] == []
async def test_list_framework_presets_pagination(self, test_client, test_api_key, test_db):
"""Test framework presets pagination."""
# Create 5 presets
for i in range(5):
preset = FrameworkPreset(
name=f"preset-{i}",
display_name=f"Preset {i}",
framework_type="backend",
test_command=f"pytest test-{i}",
is_builtin=False,
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(preset)
await test_db.commit()
# Get page 1 with page_size=2
response = test_client.get(
"/api/v1/framework-presets?page=1&page_size=2",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["total"] == 5
assert len(data["items"]) == 2
assert data["pages"] == 3
async def test_list_framework_presets_filter_by_type(self, test_client, test_api_key, test_db):
"""Test filtering framework presets by framework_type."""
# Create presets with different types
backend = FrameworkPreset(
name="django",
display_name="Django",
framework_type="backend",
test_command="pytest",
is_builtin=True,
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
frontend = FrameworkPreset(
name="react",
display_name="React",
framework_type="frontend",
test_command="npm test",
is_builtin=True,
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add_all([backend, frontend])
await test_db.commit()
# Filter for backend only
response = test_client.get(
"/api/v1/framework-presets?framework_type=backend",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["total"] == 1
assert data["items"][0]["framework_type"] == "backend"
async def test_list_framework_presets_filter_by_builtin(
self, test_client, test_api_key, test_db
):
"""Test filtering framework presets by is_builtin."""
# Create built-in and custom presets
builtin = FrameworkPreset(
name="godot",
display_name="Godot",
framework_type="game_engine",
test_command="godot --test",
is_builtin=True,
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
custom = FrameworkPreset(
name="custom-framework",
display_name="Custom Framework",
framework_type="backend",
test_command="custom test",
is_builtin=False,
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add_all([builtin, custom])
await test_db.commit()
# Filter for custom only
response = test_client.get(
"/api/v1/framework-presets?is_builtin=false",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["total"] == 1
assert data["items"][0]["is_builtin"] is False
class TestCreateFrameworkPreset:
"""Test POST /api/v1/framework-presets endpoint."""
async def test_create_framework_preset_success(self, test_client, test_api_key):
"""Test successful custom framework preset creation."""
payload = {
"name": "my-custom-framework",
"display_name": "My Custom Framework",
"framework_type": "backend",
"language": "Python",
"test_command": "pytest tests/",
"build_command": "python setup.py build",
"lint_command": "flake8 .",
"format_command": "black .",
"config_files": {"pyproject": "pyproject.toml", "setup": "setup.py"},
"description": "Custom Python framework",
}
response = test_client.post(
"/api/v1/framework-presets",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 201
data = response.json()
assert data["name"] == "my-custom-framework"
assert data["display_name"] == "My Custom Framework"
assert data["framework_type"] == "backend"
assert data["language"] == "Python"
assert data["test_command"] == "pytest tests/"
assert data["is_builtin"] is False # Custom presets are never built-in
assert "id" in data
async def test_create_framework_preset_duplicate_name(self, test_client, test_api_key, test_db):
"""Test creating preset with duplicate name."""
# Create existing preset
existing = FrameworkPreset(
name="existing-preset",
display_name="Existing Preset",
framework_type="backend",
test_command="pytest",
is_builtin=False,
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(existing)
await test_db.commit()
payload = {
"name": "existing-preset", # Duplicate name
"display_name": "Another Preset",
"framework_type": "backend",
"test_command": "pytest",
}
response = test_client.post(
"/api/v1/framework-presets",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 409
assert "already exists" in response.json()["detail"].lower()
async def test_create_framework_preset_minimal(self, test_client, test_api_key):
"""Test creating preset with minimal required fields."""
payload = {
"name": "minimal-preset",
"display_name": "Minimal Preset",
"framework_type": "language",
"test_command": "run tests",
}
response = test_client.post(
"/api/v1/framework-presets",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 201
data = response.json()
assert data["name"] == "minimal-preset"
assert data["language"] is None # Optional field
assert data["build_command"] is None # Optional field
class TestGetFrameworkPreset:
"""Test GET /api/v1/framework-presets/{preset_id} endpoint."""
async def test_get_framework_preset_success(self, test_client, test_api_key, test_db):
"""Test getting single framework preset."""
preset = FrameworkPreset(
name="godot",
display_name="Godot Engine",
framework_type="game_engine",
language="GDScript",
test_command="godot --headless --test",
build_command="godot --export",
is_builtin=True,
config_files={"project": "project.godot"},
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(preset)
await test_db.commit()
await test_db.refresh(preset)
response = test_client.get(
f"/api/v1/framework-presets/{preset.id}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["id"] == str(preset.id)
assert data["name"] == "godot"
assert data["display_name"] == "Godot Engine"
assert data["is_builtin"] is True
assert data["config_files"]["project"] == "project.godot"
async def test_get_framework_preset_not_found(self, test_client, test_api_key):
"""Test getting non-existent framework preset."""
response = test_client.get(
f"/api/v1/framework-presets/{uuid4()}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 404
assert "not found" in response.json()["detail"].lower()
class TestUpdateFrameworkPreset:
"""Test PATCH /api/v1/framework-presets/{preset_id} endpoint."""
async def test_update_custom_preset_success(self, test_client, test_api_key, test_db):
"""Test successful update of custom framework preset."""
preset = FrameworkPreset(
name="custom-preset",
display_name="Custom Preset",
framework_type="backend",
test_command="pytest",
is_builtin=False, # Custom preset
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(preset)
await test_db.commit()
await test_db.refresh(preset)
payload = {
"display_name": "Updated Custom Preset",
"test_command": "pytest -v",
"build_command": "python -m build",
}
response = test_client.patch(
f"/api/v1/framework-presets/{preset.id}",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["display_name"] == "Updated Custom Preset"
assert data["test_command"] == "pytest -v"
assert data["build_command"] == "python -m build"
async def test_update_builtin_preset_forbidden(self, test_client, test_api_key, test_db):
"""Test that built-in presets cannot be updated."""
preset = FrameworkPreset(
name="godot",
display_name="Godot Engine",
framework_type="game_engine",
test_command="godot --test",
is_builtin=True, # Built-in preset
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(preset)
await test_db.commit()
await test_db.refresh(preset)
payload = {"display_name": "Modified Godot"}
response = test_client.patch(
f"/api/v1/framework-presets/{preset.id}",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 409
assert "built-in" in response.json()["detail"].lower()
async def test_update_framework_preset_not_found(self, test_client, test_api_key):
"""Test updating non-existent framework preset."""
payload = {"display_name": "Updated"}
response = test_client.patch(
f"/api/v1/framework-presets/{uuid4()}",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 404
class TestDeleteFrameworkPreset:
"""Test DELETE /api/v1/framework-presets/{preset_id} endpoint."""
async def test_delete_custom_preset_success(self, test_client, test_api_key, test_db):
"""Test successful deletion of custom framework preset."""
preset = FrameworkPreset(
name="custom-to-delete",
display_name="Custom To Delete",
framework_type="backend",
test_command="pytest",
is_builtin=False, # Custom preset
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(preset)
await test_db.commit()
await test_db.refresh(preset)
preset_id = preset.id
response = test_client.delete(
f"/api/v1/framework-presets/{preset_id}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 204
# Verify deleted
get_response = test_client.get(
f"/api/v1/framework-presets/{preset_id}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert get_response.status_code == 404
async def test_delete_builtin_preset_forbidden(self, test_client, test_api_key, test_db):
"""Test that built-in presets cannot be deleted."""
preset = FrameworkPreset(
name="django",
display_name="Django",
framework_type="backend",
test_command="pytest",
is_builtin=True, # Built-in preset
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(preset)
await test_db.commit()
await test_db.refresh(preset)
response = test_client.delete(
f"/api/v1/framework-presets/{preset.id}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 409
assert "built-in" in response.json()["detail"].lower()
async def test_delete_framework_preset_not_found(self, test_client, test_api_key):
"""Test deleting non-existent framework preset."""
response = test_client.delete(
f"/api/v1/framework-presets/{uuid4()}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 404
| """Integration tests for FrameworkPresets API endpoints.
Tests all 5 FrameworkPreset CRUD endpoints with built-in protection.
"""
from datetime import datetime, timezone
from uuid import uuid4
import pytest
from fastapi.testclient import TestClient
from lazy_bird.api.main import app
from lazy_bird.models.framework_preset import FrameworkPreset
class TestListFrameworkPresets:
"""Test GET /api/v1/framework-presets endpoint."""
async def test_list_framework_presets_empty(self, test_client, test_api_key):
"""Test listing framework presets when none exist."""
response = test_client.get(
"/api/v1/framework-presets",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["total"] == 0
assert data["items"] == []
async def test_list_framework_presets_pagination(self, test_client, test_api_key, test_db):
"""Test framework presets pagination."""
# Create 5 presets
for i in range(5):
preset = FrameworkPreset(
name=f"preset-{i}",
display_name=f"Preset {i}",
framework_type="backend",
test_command=f"pytest test-{ | [
"# yusufkaraaslan/lazy-bird:lazy_bird/models/framework_preset.py\nFrameworkPreset"
] | yusufkaraaslan/lazy-bird | tests/integration/test_framework_presets_api.py |
"""End-to-end integration tests for complete workflows (Issue #116).
Tests full workflows from queue to completion:
- Queue task → Execute → Create PR
- Failed task retry
- Task cancellation
- Webhook delivery
- Real-time log streaming
"""
import asyncio
import json
from datetime import datetime, timezone
from pathlib import Path
from unittest.mock import AsyncMock, Mock, patch
from uuid import UUID, uuid4
import pytest
class TestQueueToExecutionToPR:
"""Test complete workflow: Queue → Execute → Create PR."""
@pytest.mark.asyncio
async def test_full_workflow_success(self):
"""Test successful end-to-end workflow."""
from lazy_bird.tasks.task_executor import _execute_task_async
task_run_id = str(uuid4())
# Mock TaskRun
mock_task_run = Mock()
mock_task_run.id = uuid4()
mock_task_run.project_id = uuid4()
mock_task_run.work_item_id = "issue-42"
mock_task_run.work_item_title = "Add health system"
mock_task_run.task_type = "feature"
mock_task_run.complexity = "medium"
mock_task_run.status = "queued"
# Mock database
class MockResult:
def scalar_one_or_none(self):
return mock_task_run
async def mock_db_generator():
mock_db = AsyncMock()
mock_db.execute = AsyncMock(return_value=MockResult())
mock_db.commit = AsyncMock()
yield mock_db
# Mock LogPublisher
mock_log_publisher = AsyncMock()
with patch("lazy_bird.tasks.task_executor.get_async_db") as mock_get_db:
mock_get_db.return_value = mock_db_generator()
with patch("lazy_bird.tasks.task_executor.LogPublisher") as mock_lp:
mock_lp.return_value = mock_log_publisher
# Execute workflow
result = await _execute_task_async(task_run_id)
# Verify success
assert result["success"] is True
assert result["status"] == "completed"
# Verify logs were published at each step
log_calls = mock_log_publisher.publish_log_async.call_args_list
assert len(log_calls) >= 7 # Start + 6 steps + completion
# Verify status was updated
assert mock_task_run.status == "completed"
assert mock_task_run.started_at is not None
assert mock_task_run.completed_at is not None
class TestFailedTaskRetry:
"""Test failed task retry workflow."""
@pytest.mark.asyncio
async def test_task_failure_triggers_retry(self):
"""Test that failed task triggers retry mechanism."""
from lazy_bird.tasks.task_executor import _execute_task_async
task_run_id = str(uuid4())
# Mock database that raises error
mock_db = AsyncMock()
mock_db.execute = AsyncMock(side_effect=RuntimeError("Test execution failed"))
async def mock_db_generator():
yield mock_db
# Mock LogPublisher
mock_log_publisher = AsyncMock()
with patch("lazy_bird.tasks.task_executor.get_async_db") as mock_get_db:
mock_get_db.return_value = mock_db_generator()
with patch("lazy_bird.tasks.task_executor.LogPublisher") as mock_lp:
mock_lp.return_value = mock_log_publisher
# Execute task (should fail)
result = await _execute_task_async(task_run_id)
# Verify failure
assert result["success"] is False
assert "Test execution failed" in result["error"]
# Verify error log was published
error_logs = [
call
for call in mock_log_publisher.publish_log_async.call_args_list
if call[1].get("level") == "ERROR"
]
assert len(error_logs) > 0
assert "Task execution failed" in error_logs[0][1]["message"]
def test_retry_backoff_calculation(self):
"""Test retry backoff is calculated correctly."""
# Exponential backoff: attempt * base_delay
base_delay = 30 # seconds
max_retries = 3
for attempt in range(1, max_retries + 1):
backoff = attempt * base_delay
expected = {1: 30, 2: 60, 3: 90}
assert backoff == expected[attempt]
class TestTaskCancellation:
"""Test task cancellation workflow."""
@pytest.mark.skip(reason="Task cancellation endpoint not yet implemented - TODO for Phase 3")
@pytest.mark.asyncio
async def test_task_cancellation_updates_status(self):
"""Test cancelling a task updates its status correctly."""
from lazy_bird.api.routers.task_runs import cancel_task_run
from lazy_bird.models.task_run import TaskRun
from sqlalchemy import select
task_run_id = uuid4()
# Mock task run in running state
mock_task_run = Mock(spec=TaskRun)
mock_task_run.id = task_run_id
mock_task_run.project_id = uuid4()
mock_task_run.status = "running"
mock_task_run.started_at = datetime.now(timezone.utc)
mock_task_run.completed_at = None
# Mock database
class MockResult:
def scalar_one_or_none(self):
return mock_task_run
mock_db = AsyncMock()
mock_db.execute = AsyncMock(return_value=MockResult())
mock_db.commit = AsyncMock()
# Mock API key
mock_api_key = Mock()
mock_api_key.project_id = None # Admin key
# Cancel task
result = await cancel_task_run(
task_run_id=task_run_id,
db=mock_db,
api_key=mock_api_key,
)
# Verify status updated
assert mock_task_run.status == "cancelled"
assert mock_task_run.completed_at is not None
assert mock_db.commit.called
@pytest.mark.skip(reason="Task cancellation endpoint not yet implemented - TODO for Phase 3")
@pytest.mark.asyncio
async def test_cannot_cancel_completed_task(self):
"""Test that completed tasks cannot be cancelled."""
from lazy_bird.api.routers.task_runs import cancel_task_run
from lazy_bird.api.exceptions import InvalidRequestError
task_run_id = uuid4()
# Mock completed task run
mock_task_run = Mock()
mock_task_run.id = task_run_id
mock_task_run.project_id = uuid4()
mock_task_run.status = "completed"
# Mock database
class MockResult:
def scalar_one_or_none(self):
return mock_task_run
mock_db = AsyncMock()
mock_db.execute = AsyncMock(return_value=MockResult())
# Mock API key
mock_api_key = Mock()
mock_api_key.project_id = None
# Try to cancel (should fail)
with pytest.raises(InvalidRequestError) as exc_info:
await cancel_task_run(
task_run_id=task_run_id,
db=mock_db,
api_key=mock_api_key,
)
assert "Cannot cancel task" in str(exc_info.value)
class TestWebhookDelivery:
"""Test webhook delivery workflow."""
@pytest.mark.skip(reason="WebhookService full implementation pending - TODO for Phase 3")
@pytest.mark.asyncio
async def test_webhook_triggers_on_task_status_change(self):
"""Test webhook is triggered when task status changes."""
from lazy_bird.services.webhook_service import WebhookService
# Create webhook service
service = WebhookService()
# Mock webhook endpoint
mock_webhook = Mock()
mock_webhook.id = uuid4()
mock_webhook.url = "https://example.com/webhook"
mock_webhook.secret = "test-secret"
mock_webhook.events = ["task.started", "task.completed"]
mock_webhook.is_active = True
# Mock HTTP client
with patch("httpx.AsyncClient") as mock_client:
mock_response = Mock()
mock_response.status_code = 200
mock_response.text = "OK"
mock_http = AsyncMock()
mock_http.post = AsyncMock(return_value=mock_response)
mock_client.return_value.__aenter__.return_value = mock_http
# Trigger webhook
result = await service.deliver_webhook_async(
webhook=mock_webhook,
event_type="task.started",
payload={
"task_id": str(uuid4()),
"status": "running",
"timestamp": datetime.now(timezone.utc).isoformat(),
},
)
# Verify webhook was called
assert result is True
assert mock_http.post.called
# Verify payload structure
call_args = mock_http.post.call_args
assert call_args[0][0] == "https://example.com/webhook"
payload = json.loads(call_args[1]["content"])
assert payload["event"] == "task.started"
assert "task_id" in payload["data"]
@pytest.mark.skip(reason="WebhookService full implementation pending - TODO for Phase 3")
@pytest.mark.asyncio
async def test_webhook_delivery_includes_signature(self):
"""Test webhook delivery includes HMAC signature."""
from lazy_bird.services.webhook_service import WebhookService
import hashlib
import hmac
service = WebhookService()
# Mock webhook with secret
mock_webhook = Mock()
mock_webhook.id = uuid4()
mock_webhook.url = "https://example.com/webhook"
mock_webhook.secret = "my-secret-key"
mock_webhook.events = ["task.completed"]
mock_webhook.is_active = True
payload = {
"task_id": str(uuid4()),
"status": "completed",
}
with patch("httpx.AsyncClient") as mock_client:
mock_response = Mock()
mock_response.status_code = 200
mock_http = AsyncMock()
mock_http.post = AsyncMock(return_value=mock_response)
mock_client.return_value.__aenter__.return_value = mock_http
await service.deliver_webhook_async(
webhook=mock_webhook,
event_type="task.completed",
payload=payload,
)
# Verify signature header
call_args = mock_http.post.call_args
headers = call_args[1]["headers"]
assert "X-Lazy-Bird-Signature" in headers
class TestRealTimeLogStreaming:
"""Test real-time log streaming workflow."""
@pytest.mark.asyncio
async def test_log_publishing_to_redis(self):
"""Test logs are published to Redis for SSE streaming."""
from lazy_bird.services.log_publisher import LogPublisher
publisher = LogPublisher(use_async=True)
# Mock Redis client
with patch("lazy_bird.services.log_publisher.get_async_redis") as mock_get_redis:
mock_redis = AsyncMock()
mock_redis.publish = AsyncMock(return_value=1) # 1 subscriber
mock_redis.lpush = AsyncMock()
mock_redis.ltrim = AsyncMock()
mock_redis.expire = AsyncMock()
mock_get_redis.return_value = mock_redis
# Publish log
result = await publisher.publish_log_async(
message="Task started",
level="INFO",
task_id="task-123",
project_id="project-456",
metadata={"step": "init"},
)
# Verify published
assert result is True
assert mock_redis.publish.called
# Verify channel
call_args = mock_redis.publish.call_args
channel = call_args[0][0]
assert channel == "lazy_bird:logs:task:task-123"
# Verify message structure
message = json.loads(call_args[0][1])
assert message["message"] == "Task started"
assert message["level"] == "INFO"
assert message["task_id"] == "task-123"
assert message["metadata"]["step"] == "init"
@pytest.mark.asyncio
async def test_sse_streams_logs_to_clients(self):
"""Test SSE endpoint streams logs to connected clients."""
from lazy_bird.api.routers.task_runs import stream_task_run_logs
task_run_id = uuid4()
# Mock task run
class MockTaskRun:
id = task_run_id
project_id = uuid4()
work_item_id = "issue-42"
status = "running"
# Mock database
class MockResult:
def scalar_one_or_none(self):
return MockTaskRun()
class MockDB:
async def execute(self, query):
return MockResult()
mock_db = MockDB()
# Mock API key
mock_api_key = Mock()
mock_api_key.project_id = None
# Mock Redis with test logs
with patch("lazy_bird.api.routers.task_runs.get_async_redis") as mock_get_redis:
with patch("lazy_bird.api.routers.task_runs.LogPublisher") as mock_log_pub:
# Mock log history
mock_publisher = AsyncMock()
mock_publisher.get_log_history_async = AsyncMock(
return_value=[
{
"timestamp": datetime.now(timezone.utc).isoformat(),
"level": "INFO",
"message": "Task started",
}
]
)
mock_log_pub.return_value = mock_publisher
# Mock Redis Pub/Sub
mock_redis = AsyncMock()
mock_pubsub = AsyncMock()
mock_pubsub.subscribe = AsyncMock()
mock_pubsub.get_message = AsyncMock(return_value=None)
mock_redis.pubsub.return_value = mock_pubsub
mock_get_redis.return_value = mock_redis
# Stream logs (will timeout after no messages)
try:
response = await stream_task_run_logs(
task_run_id=task_run_id,
level=None,
search=None,
since=None,
db=mock_db,
api_key=mock_api_key,
)
# Verify SSE response
from fastapi.responses import StreamingResponse
assert isinstance(response, StreamingResponse)
assert response.media_type == "text/event-stream"
except Exception:
# If Redis is not available, test structure is still validated
pass
class TestWorkflowIntegrationPoints:
"""Test integration between workflow components."""
@pytest.mark.asyncio
async def test_task_executor_triggers_webhooks(self):
"""Test task executor triggers webhooks on status changes."""
# This tests the integration point between executor and webhooks
from lazy_bird.tasks.task_executor import _execute_task_async
task_run_id = str(uuid4())
# Mock everything to isolate webhook triggering
mock_task_run = Mock()
mock_task_run.id = uuid4()
mock_task_run.project_id = uuid4()
mock_task_run.work_item_id = "issue-42"
mock_task_run.work_item_title = "Test task"
mock_task_run.task_type = "feature"
mock_task_run.complexity = "simple"
mock_task_run.status = "queued"
class MockResult:
def scalar_one_or_none(self):
return mock_task_run
async def mock_db_generator():
mock_db = AsyncMock()
mock_db.execute = AsyncMock(return_value=MockResult())
mock_db.commit = AsyncMock()
yield mock_db
mock_log_publisher = AsyncMock()
with patch("lazy_bird.tasks.task_executor.get_async_db") as mock_get_db:
mock_get_db.return_value = mock_db_generator()
with patch("lazy_bird.tasks.task_executor.LogPublisher") as mock_lp:
mock_lp.return_value = mock_log_publisher
# Execute task
result = await _execute_task_async(task_run_id)
# TODO: Once webhooks are integrated into task_executor,
# verify they were triggered here
# For now, verify task execution completed
assert result["success"] is True
__all__ = [
"TestQueueToExecutionToPR",
"TestFailedTaskRetry",
"TestTaskCancellation",
"TestWebhookDelivery",
"TestRealTimeLogStreaming",
"TestWorkflowIntegrationPoints",
]
| """End-to-end integration tests for complete workflows (Issue #116).
Tests full workflows from queue to completion:
- Queue task → Execute → Create PR
- Failed task retry
- Task cancellation
- Webhook delivery
- Real-time log streaming
"""
import asyncio
import json
from datetime import datetime, timezone
from pathlib import Path
from unittest.mock import AsyncMock, Mock, patch
from uuid import UUID, uuid4
import pytest
class TestQueueToExecutionToPR:
"""Test complete workflow: Queue → Execute → Create PR."""
@pytest.mark.asyncio
async def test_full_workflow_success(self):
"""Test successful end-to-end workflow."""
from lazy_bird.tasks.task_executor import _execute_task_async
task_run_id = str(uuid4())
# Mock TaskRun
mock_task_run = Mock()
mock_task_run.id = uuid4()
mock_task_run.project_id = uuid4()
mock_task_run.work_item_id = "issue-42"
mock_task_run.work_item_title = "Add health system"
mock_task_run.task_type = "feature"
mock_task_run.complexity = "medium"
mock_task_run.status = "queued"
# Mock database
class MockResult:
def scalar_one_or_none(self):
return mock_task_run
async def mock_db_ | [] | yusufkaraaslan/lazy-bird | tests/integration/test_e2e_workflows.py |
"""Integration tests for ClaudeAccounts API endpoints.
Tests all 5 ClaudeAccount CRUD endpoints with both API and subscription modes.
"""
from datetime import datetime, timezone
from decimal import Decimal
from uuid import uuid4
import pytest
from fastapi.testclient import TestClient
from lazy_bird.api.main import app
from lazy_bird.models.claude_account import ClaudeAccount
class TestListClaudeAccounts:
"""Test GET /api/v1/claude-accounts endpoint."""
async def test_list_claude_accounts_empty(self, test_client, test_api_key):
"""Test listing Claude accounts when none exist."""
response = test_client.get(
"/api/v1/claude-accounts",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["total"] == 0
assert data["items"] == []
async def test_list_claude_accounts_pagination(self, test_client, test_api_key, test_db):
"""Test Claude accounts pagination."""
# Create 5 accounts
for i in range(5):
account = ClaudeAccount(
name=f"Account {i}",
account_type="api",
api_key=f"sk-ant-test-key-{i}",
model="claude-sonnet-4-5",
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(account)
await test_db.commit()
# Get page 1 with page_size=2
response = test_client.get(
"/api/v1/claude-accounts?page=1&page_size=2",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["total"] == 5
assert len(data["items"]) == 2
assert data["pages"] == 3
async def test_list_claude_accounts_filter_by_type(self, test_client, test_api_key, test_db):
"""Test filtering Claude accounts by account_type."""
# Create API and subscription accounts
api_account = ClaudeAccount(
name="API Account",
account_type="api",
api_key="sk-ant-test-key-api",
model="claude-sonnet-4-5",
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
sub_account = ClaudeAccount(
name="Subscription Account",
account_type="subscription",
config_directory="/home/user/.claude",
model="claude-sonnet-4-5",
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add_all([api_account, sub_account])
await test_db.commit()
# Filter for API accounts only
response = test_client.get(
"/api/v1/claude-accounts?account_type=api",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["total"] == 1
assert data["items"][0]["account_type"] == "api"
class TestCreateClaudeAccount:
"""Test POST /api/v1/claude-accounts endpoint."""
async def test_create_api_account_success(self, test_client, test_api_key):
"""Test successful API account creation."""
payload = {
"name": "Production API Account",
"account_type": "api",
"api_key": "sk-ant-api-test-key-123456",
"model": "claude-sonnet-4-5",
"max_tokens": 8000,
"temperature": 0.7,
"monthly_budget_usd": 500.0,
}
response = test_client.post(
"/api/v1/claude-accounts",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 201
data = response.json()
assert data["name"] == "Production API Account"
assert data["account_type"] == "api"
assert data["model"] == "claude-sonnet-4-5"
assert data["max_tokens"] == 8000
assert Decimal(data["temperature"]) == Decimal("0.7")
assert "id" in data
# API key should not be returned
assert "api_key" not in data
async def test_create_subscription_account_success(self, test_client, test_api_key):
"""Test successful subscription account creation."""
payload = {
"name": "Personal Subscription",
"account_type": "subscription",
"config_directory": "/home/user/.config/claude",
"session_token": "session-token-test-123",
"model": "claude-sonnet-4-5",
}
response = test_client.post(
"/api/v1/claude-accounts",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 201
data = response.json()
assert data["name"] == "Personal Subscription"
assert data["account_type"] == "subscription"
assert data["config_directory"] == "/home/user/.config/claude"
# Session token should not be returned
assert "session_token" not in data
async def test_create_account_missing_api_key_for_api_type(self, test_client, test_api_key):
"""Test creating API account without api_key."""
payload = {
"name": "Incomplete API Account",
"account_type": "api",
"model": "claude-sonnet-4-5",
# api_key missing
}
response = test_client.post(
"/api/v1/claude-accounts",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 422 # Validation error
async def test_create_account_missing_config_directory_for_subscription(
self, test_client, test_api_key
):
"""Test creating subscription account without config_directory."""
payload = {
"name": "Incomplete Subscription Account",
"account_type": "subscription",
"model": "claude-sonnet-4-5",
# config_directory missing
}
response = test_client.post(
"/api/v1/claude-accounts",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 422 # Validation error
class TestGetClaudeAccount:
"""Test GET /api/v1/claude-accounts/{account_id} endpoint."""
async def test_get_claude_account_success(self, test_client, test_api_key, test_db):
"""Test getting single Claude account."""
account = ClaudeAccount(
name="Test Account",
account_type="api",
api_key="sk-ant-test-key",
model="claude-sonnet-4-5",
max_tokens=8000,
temperature=Decimal("0.7"),
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(account)
await test_db.commit()
await test_db.refresh(account)
response = test_client.get(
f"/api/v1/claude-accounts/{account.id}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["id"] == str(account.id)
assert data["name"] == "Test Account"
assert data["account_type"] == "api"
async def test_get_claude_account_not_found(self, test_client, test_api_key):
"""Test getting non-existent Claude account."""
response = test_client.get(
f"/api/v1/claude-accounts/{uuid4()}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 404
assert "not found" in response.json()["detail"].lower()
class TestUpdateClaudeAccount:
"""Test PATCH /api/v1/claude-accounts/{account_id} endpoint."""
async def test_update_claude_account_success(self, test_client, test_api_key, test_db):
"""Test successful Claude account update."""
account = ClaudeAccount(
name="Original Name",
account_type="api",
api_key="sk-ant-test-key",
model="claude-sonnet-4-5",
max_tokens=8000,
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(account)
await test_db.commit()
await test_db.refresh(account)
payload = {
"name": "Updated Name",
"max_tokens": 16000,
"temperature": 0.8,
}
response = test_client.patch(
f"/api/v1/claude-accounts/{account.id}",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["name"] == "Updated Name"
assert data["max_tokens"] == 16000
assert Decimal(data["temperature"]) == Decimal("0.8")
async def test_update_claude_account_api_key(self, test_client, test_api_key, test_db):
"""Test updating API key (encryption should work)."""
account = ClaudeAccount(
name="Test Account",
account_type="api",
api_key="sk-ant-old-key",
model="claude-sonnet-4-5",
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(account)
await test_db.commit()
await test_db.refresh(account)
payload = {"api_key": "sk-ant-new-key-updated"}
response = test_client.patch(
f"/api/v1/claude-accounts/{account.id}",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
# API key should not be in response
data = response.json()
assert "api_key" not in data
async def test_update_claude_account_not_found(self, test_client, test_api_key):
"""Test updating non-existent Claude account."""
payload = {"name": "Updated Name"}
response = test_client.patch(
f"/api/v1/claude-accounts/{uuid4()}",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 404
class TestDeleteClaudeAccount:
"""Test DELETE /api/v1/claude-accounts/{account_id} endpoint."""
async def test_delete_claude_account_success(self, test_client, test_api_key, test_db):
"""Test successful Claude account deletion."""
account = ClaudeAccount(
name="To Delete",
account_type="api",
api_key="sk-ant-test-key",
model="claude-sonnet-4-5",
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(account)
await test_db.commit()
await test_db.refresh(account)
account_id = account.id
response = test_client.delete(
f"/api/v1/claude-accounts/{account_id}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 204
# Verify deleted
get_response = test_client.get(
f"/api/v1/claude-accounts/{account_id}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert get_response.status_code == 404
async def test_delete_claude_account_not_found(self, test_client, test_api_key):
"""Test deleting non-existent Claude account."""
response = test_client.delete(
f"/api/v1/claude-accounts/{uuid4()}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 404
| """Integration tests for ClaudeAccounts API endpoints.
Tests all 5 ClaudeAccount CRUD endpoints with both API and subscription modes.
"""
from datetime import datetime, timezone
from decimal import Decimal
from uuid import uuid4
import pytest
from fastapi.testclient import TestClient
from lazy_bird.api.main import app
from lazy_bird.models.claude_account import ClaudeAccount
class TestListClaudeAccounts:
"""Test GET /api/v1/claude-accounts endpoint."""
async def test_list_claude_accounts_empty(self, test_client, test_api_key):
"""Test listing Claude accounts when none exist."""
response = test_client.get(
"/api/v1/claude-accounts",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["total"] == 0
assert data["items"] == []
async def test_list_claude_accounts_pagination(self, test_client, test_api_key, test_db):
"""Test Claude accounts pagination."""
# Create 5 accounts
for i in range(5):
account = ClaudeAccount(
name=f"Account {i}",
account_type="api",
api_key=f"sk-ant-test-key-{i}",
| [
"# yusufkaraaslan/lazy-bird:lazy_bird/models/claude_account.py\nClaudeAccount"
] | yusufkaraaslan/lazy-bird | tests/integration/test_claude_accounts_api.py |
"""Integration tests for Auth API endpoints.
Tests registration, login, token refresh, and duplicate email handling.
"""
import pytest
from fastapi.testclient import TestClient
from lazy_bird.core.security import hash_password
class TestRegister:
"""Test POST /api/v1/auth/register endpoint."""
async def test_register_success(self, test_client):
"""Test successful user registration."""
response = test_client.post(
"/api/v1/auth/register",
json={
"email": "newuser@example.com",
"password": "securepassword123",
"display_name": "New User",
},
)
assert response.status_code == 201
data = response.json()
assert data["email"] == "newuser@example.com"
assert data["display_name"] == "New User"
assert data["role"] == "user"
assert data["is_active"] is True
assert "id" in data
assert "created_at" in data
# Password should never be in the response
assert "password" not in data
assert "password_hash" not in data
async def test_register_without_display_name(self, test_client):
"""Test registration without optional display_name."""
response = test_client.post(
"/api/v1/auth/register",
json={
"email": "nodisplay@example.com",
"password": "securepassword123",
},
)
assert response.status_code == 201
data = response.json()
assert data["email"] == "nodisplay@example.com"
assert data["display_name"] is None
async def test_register_duplicate_email(self, test_client):
"""Test registration with already registered email returns 409."""
user_data = {
"email": "duplicate@example.com",
"password": "securepassword123",
"display_name": "First User",
}
# Register first user
response1 = test_client.post("/api/v1/auth/register", json=user_data)
assert response1.status_code == 201
# Try to register with same email
response2 = test_client.post("/api/v1/auth/register", json=user_data)
assert response2.status_code == 409
async def test_register_invalid_email(self, test_client):
"""Test registration with invalid email returns 422."""
response = test_client.post(
"/api/v1/auth/register",
json={
"email": "not-an-email",
"password": "securepassword123",
},
)
assert response.status_code == 422
async def test_register_short_password(self, test_client):
"""Test registration with too short password returns 422."""
response = test_client.post(
"/api/v1/auth/register",
json={
"email": "short@example.com",
"password": "short",
},
)
assert response.status_code == 422
class TestLogin:
"""Test POST /api/v1/auth/login endpoint."""
async def test_login_success(self, test_client):
"""Test successful login returns tokens."""
# Register user first
test_client.post(
"/api/v1/auth/register",
json={
"email": "loginuser@example.com",
"password": "securepassword123",
},
)
# Login
response = test_client.post(
"/api/v1/auth/login",
json={
"email": "loginuser@example.com",
"password": "securepassword123",
},
)
assert response.status_code == 200
data = response.json()
assert "access_token" in data
assert "refresh_token" in data
assert data["token_type"] == "bearer"
# Tokens should be non-empty JWT strings
assert len(data["access_token"]) > 0
assert len(data["refresh_token"]) > 0
assert "." in data["access_token"] # JWT has dots
assert "." in data["refresh_token"]
async def test_login_wrong_password(self, test_client):
"""Test login with wrong password returns 401."""
# Register user first
test_client.post(
"/api/v1/auth/register",
json={
"email": "wrongpw@example.com",
"password": "securepassword123",
},
)
# Login with wrong password
response = test_client.post(
"/api/v1/auth/login",
json={
"email": "wrongpw@example.com",
"password": "wrongpassword",
},
)
assert response.status_code == 401
async def test_login_nonexistent_user(self, test_client):
"""Test login with non-existent email returns 401."""
response = test_client.post(
"/api/v1/auth/login",
json={
"email": "nonexistent@example.com",
"password": "securepassword123",
},
)
assert response.status_code == 401
async def test_login_inactive_user(self, test_client, test_db):
"""Test login with inactive user returns 401."""
from lazy_bird.models.user import User
from datetime import datetime, timezone
# Create inactive user directly in DB
user = User(
email="inactive@example.com",
password_hash=hash_password("securepassword123"),
display_name="Inactive User",
role="user",
is_active=False,
created_at=datetime.now(timezone.utc),
)
test_db.add(user)
await test_db.commit()
# Try to login
response = test_client.post(
"/api/v1/auth/login",
json={
"email": "inactive@example.com",
"password": "securepassword123",
},
)
assert response.status_code == 401
class TestRefresh:
"""Test POST /api/v1/auth/refresh endpoint."""
async def test_refresh_success(self, test_client):
"""Test successful token refresh."""
# Register and login
test_client.post(
"/api/v1/auth/register",
json={
"email": "refreshuser@example.com",
"password": "securepassword123",
},
)
login_response = test_client.post(
"/api/v1/auth/login",
json={
"email": "refreshuser@example.com",
"password": "securepassword123",
},
)
login_data = login_response.json()
refresh_token = login_data["refresh_token"]
# Refresh token
response = test_client.post(
"/api/v1/auth/refresh",
json={"refresh_token": refresh_token},
)
assert response.status_code == 200
data = response.json()
assert "access_token" in data
assert "refresh_token" in data
assert data["token_type"] == "bearer"
async def test_refresh_invalid_token(self, test_client):
"""Test refresh with invalid token returns 401."""
response = test_client.post(
"/api/v1/auth/refresh",
json={"refresh_token": "invalid.token.here"},
)
assert response.status_code == 401
async def test_refresh_with_access_token(self, test_client):
"""Test refresh with access token (not refresh) returns 401."""
# Register and login
test_client.post(
"/api/v1/auth/register",
json={
"email": "accesstoken@example.com",
"password": "securepassword123",
},
)
login_response = test_client.post(
"/api/v1/auth/login",
json={
"email": "accesstoken@example.com",
"password": "securepassword123",
},
)
login_data = login_response.json()
access_token = login_data["access_token"]
# Try to use access token as refresh token
response = test_client.post(
"/api/v1/auth/refresh",
json={"refresh_token": access_token},
)
assert response.status_code == 401
class TestLogout:
"""Test POST /api/v1/auth/logout endpoint."""
async def test_logout(self, test_client):
"""Test logout returns success message."""
response = test_client.post("/api/v1/auth/logout")
assert response.status_code == 200
data = response.json()
assert "message" in data
| """Integration tests for Auth API endpoints.
Tests registration, login, token refresh, and duplicate email handling.
"""
import pytest
from fastapi.testclient import TestClient
from lazy_bird.core.security import hash_password
class TestRegister:
"""Test POST /api/v1/auth/register endpoint."""
async def test_register_success(self, test_client):
"""Test successful user registration."""
response = test_client.post(
"/api/v1/auth/register",
json={
"email": "newuser@example.com",
"password": "securepassword123",
"display_name": "New User",
},
)
assert response.status_code == 201
data = response.json()
assert data["email"] == "newuser@example.com"
assert data["display_name"] == "New User"
assert data["role"] == "user"
assert data["is_active"] is True
assert "id" in data
assert "created_at" in data
# Password should never be in the response
assert "password" not in data
assert "password_hash" not in data
async def test_register_without_display_name(self, test_client):
"""Test registration without optional display_name."""
response = test_client.post(
| [
"# yusufkaraaslan/lazy-bird:lazy_bird/core/security.py\nhash_password"
] | yusufkaraaslan/lazy-bird | tests/integration/test_auth_api.py |
"""Integration tests for API Keys API endpoints.
Tests all 5 API Key CRUD endpoints with security focus.
"""
from datetime import datetime, timedelta, timezone
from uuid import uuid4
import pytest
from fastapi.testclient import TestClient
from lazy_bird.api.main import app
from lazy_bird.models.api_key import ApiKey
from lazy_bird.models.project import Project
class TestListAPIKeys:
"""Test GET /api/v1/api-keys endpoint."""
async def test_list_api_keys_empty(self, test_client, test_api_key):
"""Test listing API keys when none exist (except the test key)."""
response = test_client.get(
"/api/v1/api-keys",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
# At least the test API key should exist
assert data["total"] >= 1
async def test_list_api_keys_pagination(self, test_client, test_api_key, test_db):
"""Test API keys pagination."""
# Create 5 API keys
for i in range(5):
api_key = ApiKey(
key_hash=f"hash-{i}",
key_prefix=f"lb_test{i}",
name=f"Test Key {i}",
scopes=["read"],
is_active=True,
created_at=datetime.now(timezone.utc),
)
test_db.add(api_key)
await test_db.commit()
# Get page 1 with page_size=2
response = test_client.get(
"/api/v1/api-keys?page=1&page_size=2",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert len(data["items"]) == 2
assert data["page"] == 1
async def test_list_api_keys_filter_by_scope(self, test_client, test_api_key, test_db):
"""Test filtering API keys by scope."""
# Create keys with different scopes
read_key = ApiKey(
key_hash="hash-read",
key_prefix="lb_read1",
name="Read Key",
scopes=["read"],
is_active=True,
created_at=datetime.now(timezone.utc),
)
admin_key = ApiKey(
key_hash="hash-admin",
key_prefix="lb_admin",
name="Admin Key",
scopes=["read", "write", "admin"],
is_active=True,
created_at=datetime.now(timezone.utc),
)
test_db.add_all([read_key, admin_key])
await test_db.commit()
# Filter for admin scope
response = test_client.get(
"/api/v1/api-keys?scope=admin",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["total"] >= 1
# All returned items should have admin scope
for item in data["items"]:
assert "admin" in item["scopes"]
class TestCreateAPIKey:
"""Test POST /api/v1/api-keys endpoint."""
async def test_create_api_key_success(self, test_client, test_api_key):
"""Test successful API key creation."""
payload = {
"name": "Production API Key",
"scopes": ["read", "write"],
"expires_at": (datetime.now(timezone.utc) + timedelta(days=365)).isoformat(),
}
response = test_client.post(
"/api/v1/api-keys",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 201
data = response.json()
assert data["name"] == "Production API Key"
assert set(data["scopes"]) == {"read", "write"}
assert "key" in data # Full key returned ONLY ONCE
assert data["key"].startswith("lb_")
assert len(data["key"]) == 67 # "lb_" + 64 hex chars
assert data["key_prefix"] == data["key"][:8]
async def test_create_api_key_organization_level(self, test_client, test_api_key):
"""Test creating organization-level API key (no project_id)."""
payload = {
"name": "Org-Level Key",
"scopes": ["admin"],
}
response = test_client.post(
"/api/v1/api-keys",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 201
data = response.json()
assert data["project_id"] is None # Organization-level
async def test_create_api_key_project_scoped(self, test_client, test_api_key, test_project):
"""Test creating project-scoped API key."""
payload = {
"name": "Project-Specific Key",
"project_id": str(test_project.id),
"scopes": ["read", "write"],
}
response = test_client.post(
"/api/v1/api-keys",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 201
data = response.json()
assert data["project_id"] == str(test_project.id)
async def test_create_api_key_project_not_found(self, test_client, test_api_key):
"""Test creating API key for non-existent project."""
payload = {
"name": "Invalid Project Key",
"project_id": str(uuid4()),
"scopes": ["read"],
}
response = test_client.post(
"/api/v1/api-keys",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 404
assert "not found" in response.json()["detail"].lower()
async def test_create_api_key_minimal_fields(self, test_client, test_api_key):
"""Test creating API key with minimal required fields."""
payload = {"name": "Minimal Key"} # Only name required
response = test_client.post(
"/api/v1/api-keys",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 201
data = response.json()
assert data["name"] == "Minimal Key"
assert data["scopes"] == ["read"] # Default scope
assert data["project_id"] is None # Default org-level
class TestGetAPIKey:
"""Test GET /api/v1/api-keys/{key_id} endpoint."""
async def test_get_api_key_success(self, test_client, test_api_key, test_db):
"""Test getting single API key."""
api_key = ApiKey(
key_hash="test-hash-123",
key_prefix="lb_test1",
name="Test API Key",
scopes=["read", "write"],
is_active=True,
last_used_at=datetime.now(timezone.utc),
created_at=datetime.now(timezone.utc),
)
test_db.add(api_key)
await test_db.commit()
await test_db.refresh(api_key)
response = test_client.get(
f"/api/v1/api-keys/{api_key.id}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["id"] == str(api_key.id)
assert data["name"] == "Test API Key"
assert data["key_prefix"] == "lb_test1"
assert "key" not in data # Full key NEVER returned after creation
assert set(data["scopes"]) == {"read", "write"}
async def test_get_api_key_not_found(self, test_client, test_api_key):
"""Test getting non-existent API key."""
response = test_client.get(
f"/api/v1/api-keys/{uuid4()}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 404
assert "not found" in response.json()["detail"].lower()
class TestUpdateAPIKey:
"""Test PATCH /api/v1/api-keys/{key_id} endpoint."""
async def test_update_api_key_success(self, test_client, test_api_key, test_db):
"""Test successful API key update."""
api_key = ApiKey(
key_hash="test-hash-update",
key_prefix="lb_updat",
name="Original Name",
scopes=["read"],
is_active=True,
created_at=datetime.now(timezone.utc),
)
test_db.add(api_key)
await test_db.commit()
await test_db.refresh(api_key)
payload = {
"name": "Updated Name",
"scopes": ["read", "write", "admin"],
}
response = test_client.patch(
f"/api/v1/api-keys/{api_key.id}",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["name"] == "Updated Name"
assert set(data["scopes"]) == {"read", "write", "admin"}
async def test_update_api_key_revoke(self, test_client, test_api_key, test_db):
"""Test revoking API key by setting is_active=false."""
api_key = ApiKey(
key_hash="test-hash-revoke",
key_prefix="lb_revok",
name="To Be Revoked",
scopes=["read"],
is_active=True,
created_at=datetime.now(timezone.utc),
)
test_db.add(api_key)
await test_db.commit()
await test_db.refresh(api_key)
payload = {"is_active": False}
response = test_client.patch(
f"/api/v1/api-keys/{api_key.id}",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["is_active"] is False
assert data["revoked_at"] is not None # Should set revoked_at timestamp
async def test_update_api_key_set_expiration(self, test_client, test_api_key, test_db):
"""Test setting expiration on API key."""
api_key = ApiKey(
key_hash="test-hash-expire",
key_prefix="lb_expir",
name="To Expire",
scopes=["read"],
is_active=True,
expires_at=None,
created_at=datetime.now(timezone.utc),
)
test_db.add(api_key)
await test_db.commit()
await test_db.refresh(api_key)
expiration = datetime.now(timezone.utc) + timedelta(days=90)
payload = {"expires_at": expiration.isoformat()}
response = test_client.patch(
f"/api/v1/api-keys/{api_key.id}",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
assert data["expires_at"] is not None
async def test_update_api_key_not_found(self, test_client, test_api_key):
"""Test updating non-existent API key."""
payload = {"name": "Updated"}
response = test_client.patch(
f"/api/v1/api-keys/{uuid4()}",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 404
class TestDeleteAPIKey:
"""Test DELETE /api/v1/api-keys/{key_id} endpoint."""
async def test_delete_api_key_success(self, test_client, test_api_key, test_db):
"""Test successful API key deletion."""
api_key = ApiKey(
key_hash="test-hash-delete",
key_prefix="lb_delet",
name="To Delete",
scopes=["read"],
is_active=True,
created_at=datetime.now(timezone.utc),
)
test_db.add(api_key)
await test_db.commit()
await test_db.refresh(api_key)
key_id = api_key.id
response = test_client.delete(
f"/api/v1/api-keys/{key_id}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 204
# Verify deleted
get_response = test_client.get(
f"/api/v1/api-keys/{key_id}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert get_response.status_code == 404
async def test_delete_api_key_not_found(self, test_client, test_api_key):
"""Test deleting non-existent API key."""
response = test_client.delete(
f"/api/v1/api-keys/{uuid4()}",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 404
class TestAPIKeySecurityFeatures:
"""Test API key security-specific features."""
async def test_full_key_only_shown_on_creation(self, test_client, test_api_key, test_db):
"""Test that full key is only shown once during creation."""
# Create key
payload = {"name": "Security Test Key"}
create_response = test_client.post(
"/api/v1/api-keys",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert create_response.status_code == 201
create_data = create_response.json()
full_key = create_data["key"]
key_id = create_data["id"]
# Get the same key - should NOT return full key
get_response = test_client.get(
f"/api/v1/api-keys/{key_id}",
headers={"X-API-Key": test_api_key.key_hash},
)
get_data = get_response.json()
assert "key" not in get_data
assert get_data["key_prefix"] == full_key[:8]
async def test_key_prefix_for_identification(self, test_client, test_api_key, test_db):
"""Test that key_prefix is properly generated and displayed."""
payload = {"name": "Prefix Test Key"}
response = test_client.post(
"/api/v1/api-keys",
json=payload,
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 201
data = response.json()
full_key = data["key"]
key_prefix = data["key_prefix"]
# Prefix should be first 8 characters
assert key_prefix == full_key[:8]
assert len(key_prefix) == 8
assert key_prefix.startswith("lb_")
async def test_revoked_key_has_timestamp(self, test_client, test_api_key, test_db):
"""Test that revoking a key sets revoked_at timestamp."""
api_key = ApiKey(
key_hash="test-hash-timestamp",
key_prefix="lb_times",
name="Timestamp Test",
scopes=["read"],
is_active=True,
revoked_at=None,
created_at=datetime.now(timezone.utc),
)
test_db.add(api_key)
await test_db.commit()
await test_db.refresh(api_key)
# Revoke the key
response = test_client.patch(
f"/api/v1/api-keys/{api_key.id}",
json={"is_active": False},
headers={"X-API-Key": test_api_key.key_hash},
)
data = response.json()
assert data["revoked_at"] is not None
# Verify timestamp is recent
revoked_str = data["revoked_at"].replace("Z", "+00:00")
revoked_time = datetime.fromisoformat(revoked_str)
if revoked_time.tzinfo is None:
revoked_time = revoked_time.replace(tzinfo=timezone.utc)
now = datetime.now(timezone.utc)
assert (now - revoked_time).total_seconds() < 5 # Within 5 seconds
| """Integration tests for API Keys API endpoints.
Tests all 5 API Key CRUD endpoints with security focus.
"""
from datetime import datetime, timedelta, timezone
from uuid import uuid4
import pytest
from fastapi.testclient import TestClient
from lazy_bird.api.main import app
from lazy_bird.models.api_key import ApiKey
from lazy_bird.models.project import Project
class TestListAPIKeys:
"""Test GET /api/v1/api-keys endpoint."""
async def test_list_api_keys_empty(self, test_client, test_api_key):
"""Test listing API keys when none exist (except the test key)."""
response = test_client.get(
"/api/v1/api-keys",
headers={"X-API-Key": test_api_key.key_hash},
)
assert response.status_code == 200
data = response.json()
# At least the test API key should exist
assert data["total"] >= 1
async def test_list_api_keys_pagination(self, test_client, test_api_key, test_db):
"""Test API keys pagination."""
# Create 5 API keys
for i in range(5):
api_key = ApiKey(
key_hash=f"hash-{i}",
key_prefix=f"lb_test{i}",
name=f"Test Key {i}",
| [
"# yusufkaraaslan/lazy-bird:lazy_bird/models/api_key.py\nApiKey",
"# yusufkaraaslan/lazy-bird:lazy_bird/models/project.py\nProject"
] | yusufkaraaslan/lazy-bird | tests/integration/test_api_keys_api.py |
"""Integration tests for lazy-bird services"""
| """Integration tests for lazy-bird services"""
| [] | yusufkaraaslan/lazy-bird | tests/integration/__init__.py |
"""
End-to-end tests for complete agent workflow
Tests the entire workflow from issue detection to PR creation:
1. Issue detection → task queuing
2. Worktree creation
3. Task execution (mocked)
4. Test running
5. Retry on failure (with error context)
6. PR creation
7. Cleanup
This test validates the entire agent-runner.sh workflow.
"""
import pytest
import subprocess
import tempfile
import json
import time
from pathlib import Path
import shutil
import os
# Compute repo root relative to this test file
REPO_ROOT = str(Path(__file__).resolve().parent.parent.parent)
@pytest.fixture(scope="module")
def test_environment():
"""Set up complete test environment with git repo, config, logs"""
temp_base = tempfile.mkdtemp(prefix="lazy-bird-e2e-")
# Create project directory with git
project_dir = Path(temp_base) / "test-project"
project_dir.mkdir()
subprocess.run(["git", "init"], cwd=project_dir, check=True, capture_output=True)
subprocess.run(
["git", "config", "user.email", "test@example.com"],
cwd=project_dir,
check=True,
capture_output=True,
)
subprocess.run(
["git", "config", "user.name", "Test User"],
cwd=project_dir,
check=True,
capture_output=True,
)
# Create initial project structure
(project_dir / "README.md").write_text("# Test Project")
(project_dir / "src").mkdir()
(project_dir / "src" / "main.py").write_text("print('Hello')")
subprocess.run(["git", "add", "."], cwd=project_dir, check=True, capture_output=True)
subprocess.run(
["git", "commit", "-m", "Initial commit"], cwd=project_dir, check=True, capture_output=True
)
# Create logs directory
logs_dir = Path(temp_base) / "logs"
logs_dir.mkdir()
# Create queue directory
queue_dir = Path(temp_base) / "queue"
queue_dir.mkdir()
env = {
"project_dir": project_dir,
"logs_dir": logs_dir,
"queue_dir": queue_dir,
"temp_base": Path(temp_base),
}
yield env
# Cleanup
shutil.rmtree(temp_base, ignore_errors=True)
class TestCompleteWorkflow:
"""Test complete workflow from start to finish"""
def test_worktree_lifecycle(self, test_environment):
"""Test complete worktree lifecycle: create → work → cleanup"""
project_dir = test_environment["project_dir"]
worktree_path = test_environment["temp_base"] / "agent-test-123"
branch_name = "feature-test-123"
# Step 1: Create worktree directly using git commands
create_result = subprocess.run(
["git", "worktree", "add", "-b", branch_name, str(worktree_path)],
cwd=project_dir,
capture_output=True,
text=True,
)
assert create_result.returncode == 0, f"Create failed: {create_result.stderr}"
assert worktree_path.exists(), "Worktree should exist"
# Step 2: Make changes in worktree (simulating agent work)
test_file = worktree_path / "src" / "new_feature.py"
test_file.write_text("def new_feature(): return True")
subprocess.run(["git", "add", "."], cwd=worktree_path, check=True, capture_output=True)
subprocess.run(
["git", "commit", "-m", "Add new feature"],
cwd=worktree_path,
check=True,
capture_output=True,
)
# Verify commit exists in branch
log_result = subprocess.run(
["git", "log", "--oneline", "-1"], cwd=worktree_path, capture_output=True, text=True
)
assert "Add new feature" in log_result.stdout
# Step 3: Cleanup worktree directly using git commands
cleanup_result = subprocess.run(
["git", "worktree", "remove", str(worktree_path), "--force"],
cwd=project_dir,
capture_output=True,
text=True,
)
assert cleanup_result.returncode == 0, f"Cleanup failed: {cleanup_result.stderr}"
subprocess.run(["git", "branch", "-D", branch_name], cwd=project_dir, capture_output=True)
assert not worktree_path.exists(), "Worktree should be removed"
def test_error_parsing_and_retry_context(self, test_environment):
"""Test error parsing creates proper context for retry"""
logs_dir = test_environment["logs_dir"]
# Create test log with Godot errors
test_log = logs_dir / "test-output.log"
test_log.write_text("""
Tests: 15 | Passed: 12 | Failed: 3 | Errors: 0
FAILED: test_player_movement
Expected velocity: Vector2(100, 0)
Got: Vector2(0, 0)
at res://test/test_player.gd:56
FAILED: test_enemy_ai
Enemy did not pursue player
at res://test/test_enemy.gd:89
FAILED: test_collision_detection
Collision not detected
at res://systems/physics/test_collision.gd:42
""")
# Step 1: Parse errors using Python parser
parser_script = f"{REPO_ROOT}/scripts/parse_test_errors.py"
parse_result = subprocess.run(
["python3", parser_script, str(test_log), "godot"], capture_output=True, text=True
)
assert parse_result.returncode == 0
error_context = parse_result.stdout
# Step 2: Verify error context has critical information
assert "3 Error(s) Found" in error_context or "Failed: 3" in error_context
assert "test_player_movement" in error_context
assert "test_player.gd:56" in error_context
assert "test_enemy.gd:89" in error_context
# Step 3: Verify JSON output has structured data
json_result = subprocess.run(
["python3", parser_script, str(test_log), "godot", "--json"],
capture_output=True,
text=True,
)
assert json_result.returncode == 0
error_data = json.loads(json_result.stdout)
assert error_data["error_count"] == 3
assert error_data["stats"]["failed"] == 3
assert len(error_data["errors"]) == 3
# Verify each error has required fields
for error in error_data["errors"]:
assert "test_name" in error
assert "file" in error
assert "line" in error
assert "error" in error
# This error_context would be passed to run_claude on retry
# Verify it's substantial enough to help Claude
assert len(error_context) > 100, "Error context should be detailed"
def test_multi_attempt_workflow(self, test_environment):
"""Test workflow with multiple retry attempts"""
# Simulate retry workflow
max_retries = 3
total_attempts = max_retries + 1
assert total_attempts == 4, "Should have 4 total attempts (1 initial + 3 retries)"
# Test backoff calculation
backoffs = []
for attempt in range(1, max_retries + 1):
backoff = 30 * attempt
backoffs.append(backoff)
assert backoffs == [30, 60, 90], "Backoff should increase: 30s, 60s, 90s"
def test_bash_function_integration(self, test_environment):
"""Test that all critical bash functions exist and can be called"""
script_path = f"{REPO_ROOT}/scripts/agent-runner.sh"
critical_functions = [
"create_worktree",
"cleanup_worktree",
"parse_test_errors",
"run_tests",
"commit_changes",
"push_branch",
"run_claude",
]
for func_name in critical_functions:
# Verify function exists
result = subprocess.run(
["grep", "-c", f"^{func_name}()", script_path], capture_output=True, text=True
)
assert result.returncode == 0, f"Function {func_name} should exist"
count = int(result.stdout.strip())
assert count >= 1, f"Function {func_name} should be defined at least once"
def test_error_context_appended_to_claude_prompt(self):
"""Test that error context is properly appended to Claude prompt in run_claude"""
script_path = f"{REPO_ROOT}/scripts/agent-runner.sh"
# Extract run_claude function
result = subprocess.run(
["grep", "-A", "50", "^run_claude()", script_path], capture_output=True, text=True
)
assert result.returncode == 0
function_code = result.stdout
# Verify error context handling
assert (
'local error_context="${1:-}"' in function_code
or 'error_context="${1:-}"' in function_code
)
assert (
'if [ -n "$error_context" ]' in function_code
or 'if [[ -n "$error_context" ]]' in function_code
)
assert "PREVIOUS ATTEMPT FAILED" in function_code
assert "$error_context" in function_code
class TestWorkflowFailureHandling:
"""Test workflow handles failures gracefully"""
def test_cleanup_on_exit_trap(self):
"""Test that cleanup_worktree is registered as EXIT trap"""
script_path = f"{REPO_ROOT}/scripts/agent-runner.sh"
result = subprocess.run(
["grep", "trap cleanup_worktree EXIT", script_path], capture_output=True, text=True
)
assert result.returncode == 0, "EXIT trap should be registered"
assert "trap cleanup_worktree EXIT" in result.stdout
def test_error_handling_set_in_script(self):
"""Test that script has proper error handling enabled"""
script_path = f"{REPO_ROOT}/scripts/agent-runner.sh"
with open(script_path) as f:
content = f.read()
# Should have error handling
assert "set -" in content, "Script should have set - flags"
# Should have ERR trap management for retry loop
assert "trap - ERR" in content or "trap ERR" in content
class TestWorkflowMetrics:
"""Test workflow tracking and metrics"""
def test_all_workflow_steps_present(self):
"""Test that agent-runner.sh contains all 11 workflow steps"""
script_path = f"{REPO_ROOT}/scripts/agent-runner.sh"
with open(script_path) as f:
content = f.read()
# Key workflow steps that should be present
workflow_steps = [
"create_worktree", # Step 2
"run_claude", # Step 4 & 8
"run_tests", # Step 5 & 9
"parse_test_errors", # Step 7
"commit_changes", # Step 10
"push_branch", # Step 11
"cleanup_worktree", # Exit trap
]
for step in workflow_steps:
assert (
f"{step}()" in content or f"def {step}" in content
), f"Workflow step '{step}' should be present"
class TestFrameworkSupport:
"""Test multi-framework error parsing support"""
def test_godot_framework_supported(self, test_environment):
"""Test Godot error parsing"""
logs_dir = test_environment["logs_dir"]
test_log = logs_dir / "godot-test.log"
test_log.write_text("Tests: 10 | Passed: 9 | Failed: 1 | Errors: 0")
parser_script = f"{REPO_ROOT}/scripts/parse_test_errors.py"
result = subprocess.run(
["python3", parser_script, str(test_log), "godot", "--json"],
capture_output=True,
text=True,
)
assert result.returncode == 0
data = json.loads(result.stdout)
assert data["framework"] == "godot"
def test_python_framework_supported(self, test_environment):
"""Test Python/pytest error parsing"""
logs_dir = test_environment["logs_dir"]
test_log = logs_dir / "pytest.log"
test_log.write_text("FAILED test_example.py::test_one - AssertionError")
parser_script = f"{REPO_ROOT}/scripts/parse_test_errors.py"
result = subprocess.run(
["python3", parser_script, str(test_log), "python", "--json"],
capture_output=True,
text=True,
)
assert result.returncode == 0
data = json.loads(result.stdout)
assert data["framework"] == "python"
def test_rust_framework_supported(self, test_environment):
"""Test Rust error parsing"""
logs_dir = test_environment["logs_dir"]
test_log = logs_dir / "cargo-test.log"
test_log.write_text("test result: FAILED. 5 passed; 1 failed; 0 ignored")
parser_script = f"{REPO_ROOT}/scripts/parse_test_errors.py"
result = subprocess.run(
["python3", parser_script, str(test_log), "rust", "--json"],
capture_output=True,
text=True,
)
assert result.returncode == 0
data = json.loads(result.stdout)
assert data["framework"] == "rust"
if __name__ == "__main__":
pytest.main([__file__, "-v", "-s"])
| """
End-to-end tests for complete agent workflow
Tests the entire workflow from issue detection to PR creation:
1. Issue detection → task queuing
2. Worktree creation
3. Task execution (mocked)
4. Test running
5. Retry on failure (with error context)
6. PR creation
7. Cleanup
This test validates the entire agent-runner.sh workflow.
"""
import pytest
import subprocess
import tempfile
import json
import time
from pathlib import Path
import shutil
import os
# Compute repo root relative to this test file
REPO_ROOT = str(Path(__file__).resolve().parent.parent.parent)
@pytest.fixture(scope="module")
def test_environment():
"""Set up complete test environment with git repo, config, logs"""
temp_base = tempfile.mkdtemp(prefix="lazy-bird-e2e-")
# Create project directory with git
project_dir = Path(temp_base) / "test-project"
project_dir.mkdir()
subprocess.run(["git", "init"], cwd=project_dir, check=True, capture_output=True)
subprocess.run(
["git", "config", "user.email", "test@example.com"],
cwd=project_dir,
check=True,
capture_output=True,
)
subprocess.run(
["git", "config", "user.name", "Test User"],
cwd=project_dir,
check=True,
capture_output=True,
)
# Create initial project structure
(project_dir / "README.md").write_text("# Test Project")
(project_dir / "src").mkdir()
(project_ | [] | yusufkaraaslan/lazy-bird | tests/e2e/test_complete_workflow.py |
"""
Pytest configuration and shared fixtures for lazy-bird tests
"""
import pytest
import tempfile
import shutil
from pathlib import Path
from typing import Dict, Any
from unittest.mock import Mock, MagicMock
@pytest.fixture
def temp_dir():
"""Create a temporary directory for test files"""
temp_path = Path(tempfile.mkdtemp())
yield temp_path
# Cleanup
if temp_path.exists():
shutil.rmtree(temp_path)
@pytest.fixture
def mock_config():
"""Mock lazy-bird configuration"""
return {
"project_type": "python",
"project_path": "/tmp/test-project",
"git_platform": "github",
"repository": "user/repo",
"test_command": "pytest",
"build_command": None,
"lint_command": "flake8",
"docker": {"enabled": True, "memory_limit": "2G"},
"godot_server": {"enabled": True, "port": 5000, "host": "127.0.0.1"},
}
@pytest.fixture
def mock_project_config():
"""Mock project configuration for multi-project tests"""
return {
"id": "test-project",
"name": "Test Project",
"type": "python",
"path": "/tmp/test-project",
"repository": "user/test-project",
"git_platform": "github",
"test_command": "pytest",
"build_command": "python -m build",
"lint_command": "flake8",
"enabled": True,
}
@pytest.fixture
def mock_multi_project_config(mock_project_config):
"""Mock configuration with multiple projects"""
return {
"projects": [
mock_project_config,
{
"id": "godot-game",
"name": "Godot Game",
"type": "godot",
"path": "/tmp/godot-game",
"repository": "user/godot-game",
"git_platform": "github",
"test_command": "godot --headless -s addons/gdUnit4/bin/GdUnitCmdTool.gd",
"enabled": True,
},
{
"id": "django-backend",
"name": "Django Backend",
"type": "django",
"path": "/tmp/django-backend",
"repository": "user/django-backend",
"git_platform": "gitlab",
"test_command": "pytest",
"lint_command": "pylint",
"enabled": False,
},
],
"global": {"poll_interval": 60, "max_retries": 3, "timeout": 300},
}
@pytest.fixture
def mock_github_issue():
"""Mock GitHub issue response"""
return {
"number": 42,
"title": "[Task]: Add player health system",
"body": "## Task Description\nAdd health tracking to player\n\n## Detailed Steps\n1. Add health variable\n2. Add take_damage method\n\n## Acceptance Criteria\n- [ ] Health starts at 100\n- [ ] Damage reduces health\n\n## Complexity\nmedium",
"state": "open",
"labels": [{"name": "ready"}],
"created_at": "2025-11-29T10:00:00Z",
"updated_at": "2025-11-29T10:00:00Z",
"html_url": "https://github.com/user/repo/issues/42",
}
@pytest.fixture
def mock_gitlab_issue():
"""Mock GitLab issue response"""
return {
"iid": 42,
"title": "[Task]: Add player health system",
"description": "## Task Description\nAdd health tracking to player",
"state": "opened",
"labels": ["ready"],
"created_at": "2025-11-29T10:00:00Z",
"updated_at": "2025-11-29T10:00:00Z",
"web_url": "https://gitlab.com/user/repo/-/issues/42",
}
@pytest.fixture
def mock_test_job():
"""Mock Godot server test job"""
return {
"id": "job-12345",
"project_path": "/tmp/test-project",
"test_suite": "res://tests/test_player.gd",
"status": "queued",
"created_at": "2025-11-29T10:00:00",
"timeout": 300,
}
@pytest.fixture
def mock_flask_app():
"""Mock Flask app for testing"""
from flask import Flask
app = Flask(__name__)
app.config["TESTING"] = True
return app
@pytest.fixture
def mock_requests(monkeypatch):
"""Mock requests library"""
mock_response = Mock()
mock_response.status_code = 200
mock_response.json.return_value = {"status": "ok"}
mock_get = Mock(return_value=mock_response)
mock_post = Mock(return_value=mock_response)
monkeypatch.setattr("requests.get", mock_get)
monkeypatch.setattr("requests.post", mock_post)
return {"get": mock_get, "post": mock_post, "response": mock_response}
@pytest.fixture
def mock_subprocess(monkeypatch):
"""Mock subprocess calls"""
mock_result = Mock()
mock_result.returncode = 0
mock_result.stdout = "Success"
mock_result.stderr = ""
mock_run = Mock(return_value=mock_result)
mock_call = Mock(return_value=0)
monkeypatch.setattr("subprocess.run", mock_run)
monkeypatch.setattr("subprocess.call", mock_call)
return {"run": mock_run, "call": mock_call, "result": mock_result}
@pytest.fixture
def secrets_dir(temp_dir):
"""Create a mock secrets directory with API token"""
secrets_path = temp_dir / ".config" / "lazy_birtd" / "secrets"
secrets_path.mkdir(parents=True, exist_ok=True)
# Create mock API token
token_file = secrets_path / "api_token"
token_file.write_text("ghp_mock_token_12345")
token_file.chmod(0o600)
return secrets_path
@pytest.fixture
def mock_package_root(temp_dir):
"""Mock PACKAGE_ROOT directory structure"""
# Create directory structure
(temp_dir / "scripts").mkdir(parents=True)
(temp_dir / "config").mkdir(parents=True)
(temp_dir / "web").mkdir(parents=True)
# Create mock wizard script
wizard = temp_dir / "wizard.sh"
wizard.write_text('#!/bin/bash\necho "Mock wizard"')
wizard.chmod(0o755)
# Create mock scripts
(temp_dir / "scripts" / "godot-server.py").write_text(
'#!/usr/bin/env python3\nprint("Mock godot server")'
)
(temp_dir / "scripts" / "issue-watcher.py").write_text(
'#!/usr/bin/env python3\nprint("Mock issue watcher")'
)
(temp_dir / "scripts" / "project-manager.py").write_text(
'#!/usr/bin/env python3\nprint("Mock project manager")'
)
return temp_dir
# ============================================================================
# FastAPI / API Testing Fixtures
# ============================================================================
def _create_sqlite_compatible_tables(connection):
"""Create tables in SQLite by stripping PostgreSQL-specific features.
The models use PG-specific types (ARRAY, JSONB, UUID, TSVECTOR, etc.)
and PG-specific server_defaults (gen_random_uuid). This function builds
a clean MetaData with SQLite-compatible types.
"""
from sqlalchemy import MetaData, Table, Column, String, JSON, Text
import sqlalchemy.dialects.postgresql as pg_types
from lazy_bird.core.database import Base
# Import all models so they register with Base.metadata
import lazy_bird.models # noqa: F401
# Map PG-specific types to SQLite-compatible equivalents
PG_TYPE_MAP = {
pg_types.ARRAY: lambda _: JSON(),
pg_types.JSONB: lambda _: JSON(),
pg_types.JSON: lambda _: JSON(),
pg_types.UUID: lambda _: String(36),
pg_types.TSVECTOR: lambda _: Text(),
pg_types.INET: lambda _: String(45),
pg_types.CIDR: lambda _: String(45),
pg_types.MACADDR: lambda _: String(17),
pg_types.ENUM: lambda t: String(255),
pg_types.INTERVAL: lambda _: String(50),
}
meta = MetaData()
for table in Base.metadata.sorted_tables:
columns = []
for col in table.columns:
col_type = col.type
# Replace any PG-specific type
for pg_cls, factory in PG_TYPE_MAP.items():
if isinstance(col_type, pg_cls):
col_type = factory(col_type)
break
kwargs = {
"primary_key": col.primary_key,
"nullable": col.nullable,
}
# Skip PG-specific server_defaults (gen_random_uuid, array literals)
if col.server_default is not None:
sd_text = ""
if hasattr(col.server_default, "arg"):
sd_text = str(col.server_default.arg)
if not any(s in sd_text for s in ["gen_random_uuid", "'{", "ARRAY"]):
kwargs["server_default"] = col.server_default
columns.append(Column(col.name, col_type, **kwargs))
# Create table without PG-specific constraints
Table(table.name, meta, *columns)
meta.create_all(connection)
@pytest.fixture
async def test_db():
"""Create test database session.
Creates an in-memory SQLite database for testing.
PostgreSQL-specific types (ARRAY, gen_random_uuid) are replaced with
SQLite-compatible equivalents.
"""
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
from sqlalchemy.orm import sessionmaker
import json
from sqlalchemy import event as sa_event
# Create in-memory async SQLite database
engine = create_async_engine(
"sqlite+aiosqlite:///:memory:",
echo=False,
json_serializer=json.dumps,
json_deserializer=json.loads,
)
# Patch ARRAY and JSONB columns in ORM models to use JSON type on SQLite.
# This ensures proper serialization/deserialization of list/dict values.
from sqlalchemy import JSON as SA_JSON
from sqlalchemy.dialects.postgresql import ARRAY, JSONB
import lazy_bird.models # noqa: F401
from lazy_bird.core.database import Base
for table in Base.metadata.sorted_tables:
for col in table.columns:
if isinstance(col.type, (ARRAY, JSONB)):
col.type = SA_JSON()
# Create tables with SQLite-compatible DDL
async with engine.begin() as conn:
await conn.run_sync(_create_sqlite_compatible_tables)
# Create session factory
async_session_factory = sessionmaker(engine, class_=AsyncSession, expire_on_commit=False)
# Create session
async with async_session_factory() as session:
yield session
# Cleanup
await engine.dispose()
@pytest.fixture
def test_client(test_db):
"""Create FastAPI test client with mocked database."""
from fastapi.testclient import TestClient
from lazy_bird.api.main import app
from lazy_bird.api.dependencies import get_async_database
# Override database dependency
async def override_get_db():
yield test_db
app.dependency_overrides[get_async_database] = override_get_db
client = TestClient(app)
yield client
# Cleanup
app.dependency_overrides.clear()
@pytest.fixture
async def test_api_key(test_db):
"""Create test API key with admin scope.
Returns the ApiKey model with an extra `raw_key` attribute
containing the unhashed key for use in X-API-Key headers.
"""
from lazy_bird.models.api_key import ApiKey
from lazy_bird.core.security import generate_api_key, hash_api_key, get_api_key_prefix
from datetime import datetime, timezone
raw_key = generate_api_key()
api_key = ApiKey(
name="Test API Key",
key_hash=hash_api_key(raw_key),
key_prefix=get_api_key_prefix(raw_key),
scopes=["admin"],
is_active=True,
created_at=datetime.now(timezone.utc),
)
test_db.add(api_key)
await test_db.commit()
await test_db.refresh(api_key)
# Expunge from session so we can modify attributes without dirtying it.
# Tests use test_api_key.key_hash as the X-API-Key header value,
# so we swap key_hash to the raw key for convenience.
from sqlalchemy.orm import make_transient
test_db.expunge(api_key)
make_transient(api_key)
api_key.key_hash = raw_key
return api_key
@pytest.fixture
async def test_project(test_db):
"""Create test project in database."""
from lazy_bird.models.project import Project
from datetime import datetime, timezone
from decimal import Decimal
project = Project(
name="Test Project",
slug="test-project",
repo_url="https://github.com/user/test-project",
default_branch="main",
project_type="python",
automation_enabled=True,
test_command="pytest",
build_command="python -m build",
lint_command="flake8",
max_concurrent_tasks=3,
task_timeout_seconds=1800,
max_cost_per_task_usd=Decimal("5.00"),
daily_cost_limit_usd=Decimal("50.00"),
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
test_db.add(project)
await test_db.commit()
await test_db.refresh(project)
return project
| """
Pytest configuration and shared fixtures for lazy-bird tests
"""
import pytest
import tempfile
import shutil
from pathlib import Path
from typing import Dict, Any
from unittest.mock import Mock, MagicMock
@pytest.fixture
def temp_dir():
"""Create a temporary directory for test files"""
temp_path = Path(tempfile.mkdtemp())
yield temp_path
# Cleanup
if temp_path.exists():
shutil.rmtree(temp_path)
@pytest.fixture
def mock_config():
"""Mock lazy-bird configuration"""
return {
"project_type": "python",
"project_path": "/tmp/test-project",
"git_platform": "github",
"repository": "user/repo",
"test_command": "pytest",
"build_command": None,
"lint_command": "flake8",
"docker": {"enabled": True, "memory_limit": "2G"},
"godot_server": {"enabled": True, "port": 5000, "host": "127.0.0.1"},
}
@pytest.fixture
def mock_project_config():
"""Mock project configuration for multi-project tests"""
return {
"id": "test-project",
"name": "Test Project",
"type": "python",
"path": "/tmp/test-project",
"repository": "user/test-project",
"git_platform": "github",
"test_ | [] | yusufkaraaslan/lazy-bird | tests/conftest.py |
"""
Lazy_Bird Test Suite
Comprehensive unit and integration tests for the lazy-bird package.
"""
| """
Lazy_Bird Test Suite
Comprehensive unit and integration tests for the lazy-bird package.
"""
| [] | yusufkaraaslan/lazy-bird | tests/__init__.py |
#!/usr/bin/env python3
"""
Lazy_Bird Queue Processor Service
Monitors task queue and spawns agent-runner.sh processes for queued tasks
Phase 1.1: Multi-project support with concurrent agent management
"""
import time
import sys
import json
import logging
import subprocess
import signal
from pathlib import Path
from typing import Dict, List, Optional, Set
from datetime import datetime
import yaml
# Configure logging
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger = logging.getLogger("queue-processor")
class AgentProcess:
"""Represents a running agent process"""
def __init__(
self,
task_id: str,
project_id: str,
issue_number: int,
process: subprocess.Popen,
task_file: Path,
):
self.task_id = task_id
self.project_id = project_id
self.issue_number = issue_number
self.process = process
self.task_file = task_file
self.started_at = datetime.now()
def is_running(self) -> bool:
"""Check if process is still running"""
return self.process.poll() is None
def get_exit_code(self) -> Optional[int]:
"""Get process exit code if finished"""
return self.process.poll()
class QueueProcessor:
"""Monitors task queue and spawns agent runners"""
def __init__(self):
"""Initialize queue processor"""
self.config_file = Path.home() / ".config" / "lazy_birtd" / "config.yml"
self.queue_dir = Path.home() / ".config" / "lazy_birtd" / "queue"
self.log_dir = Path.home() / ".config" / "lazy_birtd" / "logs"
self.scripts_dir = Path(__file__).parent
# Load configuration
self.config = self.load_config()
self.max_concurrent_agents = self.config.get("max_concurrent_agents", 1)
self.poll_interval = self.config.get("poll_interval_seconds", 30)
# Active agent processes
self.active_agents: Dict[str, AgentProcess] = {}
# Shutdown flag
self.shutdown_requested = False
# Create directories if needed
self.queue_dir.mkdir(parents=True, exist_ok=True)
self.log_dir.mkdir(parents=True, exist_ok=True)
logger.info("Queue Processor initialized")
logger.info(f" Queue directory: {self.queue_dir}")
logger.info(f" Max concurrent agents: {self.max_concurrent_agents}")
logger.info(f" Poll interval: {self.poll_interval}s")
def load_config(self) -> Dict:
"""Load configuration from YAML file"""
if not self.config_file.exists():
logger.error(f"Configuration file not found: {self.config_file}")
raise FileNotFoundError(f"Config not found: {self.config_file}")
try:
with open(self.config_file, "r") as f:
config = yaml.safe_load(f)
return config
except Exception as e:
logger.error(f"Failed to load configuration: {e}")
raise
def get_task_status(self, task_file: Path) -> Optional[str]:
"""Read task status from JSON file"""
try:
with open(task_file, "r") as f:
task_data = json.load(f)
# Default to "queued" for tasks without status (newly created by issue-watcher)
return task_data.get("status", "queued")
except Exception as e:
logger.error(f"Failed to read task file {task_file}: {e}")
return None
def update_task_status(self, task_file: Path, status: str, agent_pid: Optional[int] = None):
"""Update task status in JSON file"""
try:
with open(task_file, "r") as f:
task_data = json.load(f)
task_data["status"] = status
task_data["_last_updated"] = datetime.now().isoformat()
if agent_pid:
task_data["_agent_pid"] = agent_pid
if status == "processing":
task_data["_processing_started_at"] = datetime.now().isoformat()
elif status in ["completed", "failed"]:
task_data["completed_at"] = datetime.now().isoformat()
with open(task_file, "w") as f:
json.dump(task_data, f, indent=2)
logger.info(f"Updated task {task_file.stem} status: {status}")
except Exception as e:
logger.error(f"Failed to update task file {task_file}: {e}")
def find_queued_tasks(self) -> List[Path]:
"""Find all queued task files"""
queued_tasks = []
for task_file in self.queue_dir.glob("task-*.json"):
status = self.get_task_status(task_file)
if status == "queued":
queued_tasks.append(task_file)
return sorted(queued_tasks, key=lambda p: p.stat().st_mtime)
def spawn_agent(self, task_file: Path) -> Optional[AgentProcess]:
"""Spawn agent-runner.sh process for a task"""
try:
# Parse task file to get metadata
with open(task_file, "r") as f:
task_data = json.load(f)
task_id = task_data.get("task_id", task_file.stem)
project_id = task_data.get("project_id", "unknown")
issue_number = task_data.get("issue_id", 0)
# Path to agent-runner.sh
agent_runner = self.scripts_dir / "agent-runner.sh"
if not agent_runner.exists():
logger.error(f"agent-runner.sh not found at {agent_runner}")
return None
# Prepare log file
log_file = self.log_dir / f"{task_id}.log"
logger.info(
f"Spawning agent for task {task_id} (Issue #{issue_number}, Project: {project_id})"
)
# Spawn agent-runner.sh as subprocess
with open(log_file, "w") as log:
process = subprocess.Popen(
[str(agent_runner), str(task_file)],
stdout=log,
stderr=subprocess.STDOUT,
start_new_session=True, # Detach from parent
)
# Update task status to processing
self.update_task_status(task_file, "processing", agent_pid=process.pid)
# Create AgentProcess object
agent = AgentProcess(task_id, project_id, issue_number, process, task_file)
logger.info(f"Agent spawned: PID {process.pid}, Log: {log_file}")
return agent
except Exception as e:
logger.error(f"Failed to spawn agent for {task_file}: {e}")
self.update_task_status(task_file, "failed")
return None
def cleanup_finished_agents(self):
"""Check for finished agents and clean them up"""
finished_task_ids = []
for task_id, agent in self.active_agents.items():
if not agent.is_running():
exit_code = agent.get_exit_code()
if exit_code == 0:
logger.info(
f"Agent completed successfully: {task_id} (PID {agent.process.pid})"
)
self.update_task_status(agent.task_file, "completed")
else:
logger.error(
f"Agent failed: {task_id} (PID {agent.process.pid}, Exit code: {exit_code})"
)
self.update_task_status(agent.task_file, "failed")
finished_task_ids.append(task_id)
# Remove finished agents
for task_id in finished_task_ids:
del self.active_agents[task_id]
def available_agent_slots(self) -> int:
"""Calculate how many more agents can be spawned"""
return self.max_concurrent_agents - len(self.active_agents)
def process_queue(self):
"""Main queue processing logic - one iteration"""
# Clean up finished agents
self.cleanup_finished_agents()
# Check if we can spawn more agents
available_slots = self.available_agent_slots()
if available_slots <= 0:
return
# Find queued tasks
queued_tasks = self.find_queued_tasks()
if not queued_tasks:
return
logger.info(
f"Found {len(queued_tasks)} queued task(s), {available_slots} agent slot(s) available"
)
# Spawn agents for queued tasks
spawned = 0
for task_file in queued_tasks:
if spawned >= available_slots:
break
# Check if already processing (race condition protection)
task_id = task_file.stem
if task_id in self.active_agents:
continue
agent = self.spawn_agent(task_file)
if agent:
self.active_agents[task_id] = agent
spawned += 1
def run(self):
"""Main service loop"""
logger.info("🚀 Queue Processor started")
logger.info(f" Max concurrent agents: {self.max_concurrent_agents}")
logger.info(f" Polling every {self.poll_interval} seconds")
logger.info(f" Press Ctrl+C to stop")
logger.info("")
# Setup signal handlers
signal.signal(signal.SIGINT, self.handle_shutdown)
signal.signal(signal.SIGTERM, self.handle_shutdown)
while not self.shutdown_requested:
try:
self.process_queue()
time.sleep(self.poll_interval)
except KeyboardInterrupt:
break
except Exception as e:
logger.error(f"Error in main loop: {e}")
time.sleep(self.poll_interval)
logger.info("Shutting down...")
self.shutdown()
def handle_shutdown(self, signum, frame):
"""Handle shutdown signals"""
logger.info(f"Received signal {signum}, initiating shutdown...")
self.shutdown_requested = True
def shutdown(self):
"""Graceful shutdown - wait for agents or kill them"""
if not self.active_agents:
logger.info("No active agents, exiting")
return
logger.info(f"Waiting for {len(self.active_agents)} active agent(s) to finish...")
logger.info("Press Ctrl+C again to force kill")
try:
# Wait up to 30 seconds for agents to finish
wait_time = 0
while self.active_agents and wait_time < 30:
self.cleanup_finished_agents()
time.sleep(1)
wait_time += 1
# Force kill remaining agents
if self.active_agents:
logger.warning(f"Force killing {len(self.active_agents)} remaining agent(s)")
for task_id, agent in self.active_agents.items():
try:
agent.process.terminate()
logger.info(f"Terminated agent: {task_id} (PID {agent.process.pid})")
except Exception as e:
logger.error(f"Failed to terminate agent {task_id}: {e}")
except KeyboardInterrupt:
logger.warning("Force shutdown requested")
for task_id, agent in self.active_agents.items():
try:
agent.process.kill()
except:
pass
logger.info("Queue Processor stopped")
def main():
"""Entry point"""
try:
processor = QueueProcessor()
processor.run()
except Exception as e:
logger.error(f"Fatal error: {e}")
sys.exit(1)
if __name__ == "__main__":
main()
| #!/usr/bin/env python3
"""
Lazy_Bird Queue Processor Service
Monitors task queue and spawns agent-runner.sh processes for queued tasks
Phase 1.1: Multi-project support with concurrent agent management
"""
import time
import sys
import json
import logging
import subprocess
import signal
from pathlib import Path
from typing import Dict, List, Optional, Set
from datetime import datetime
import yaml
# Configure logging
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger = logging.getLogger("queue-processor")
class AgentProcess:
"""Represents a running agent process"""
def __init__(
self,
task_id: str,
project_id: str,
issue_number: int,
process: subprocess.Popen,
task_file: Path,
):
self.task_id = task_id
self.project_id = project_id
self.issue_number = issue_number
self.process = process
self.task_file = task_file
self.started_at = datetime.now()
def is_running(self) -> bool:
"""Check if process is still running"""
return self.process.poll() is None
def get_exit_code(self) -> Optional[int]:
"""Get process exit code if finished"""
return self.process.poll()
class QueueProcessor:
| [] | yusufkaraaslan/lazy-bird | scripts/queue-processor.py |
#!/usr/bin/env python3
"""
Lazy_Bird Project Manager CLI
Manage multiple projects in Phase 1.1 multi-project configuration
"""
import sys
import argparse
from pathlib import Path
from typing import Dict, List, Optional
import json
def load_config(config_path: Path) -> Dict:
"""Load configuration from YAML file"""
if not config_path.exists():
print(f"Error: Configuration file not found: {config_path}", file=sys.stderr)
sys.exit(1)
try:
import yaml
with open(config_path, "r") as f:
return yaml.safe_load(f)
except ImportError:
print("Error: PyYAML not installed. Install with: pip3 install pyyaml", file=sys.stderr)
sys.exit(1)
except Exception as e:
print(f"Error loading configuration: {e}", file=sys.stderr)
sys.exit(1)
def save_config(config_path: Path, config: Dict):
"""Save configuration to YAML file"""
try:
import yaml
# Backup existing config
if config_path.exists():
backup_path = config_path.with_suffix(".yml.backup")
config_path.rename(backup_path)
print(f"✅ Backup created: {backup_path}")
with open(config_path, "w") as f:
yaml.safe_dump(config, f, default_flow_style=False, sort_keys=False)
print(f"✅ Configuration saved: {config_path}")
except Exception as e:
print(f"Error saving configuration: {e}", file=sys.stderr)
sys.exit(1)
def get_projects(config: Dict) -> List[Dict]:
"""Get projects list from config"""
if "projects" in config and config["projects"]:
return config["projects"]
return []
def find_project(projects: List[Dict], project_id: str) -> Optional[Dict]:
"""Find project by ID"""
for project in projects:
if project.get("id") == project_id:
return project
return None
def validate_project(project: Dict, allow_partial: bool = False) -> List[str]:
"""Validate project fields, return list of errors"""
required_fields = ["id", "name", "type", "path", "repository", "git_platform", "test_command"]
errors = []
if not allow_partial:
for field in required_fields:
if field not in project or not project[field]:
errors.append(f"Missing required field: {field}")
# Validate field formats
if "id" in project:
project_id = project["id"]
if not project_id or not project_id.replace("-", "").replace("_", "").isalnum():
errors.append(
f"Invalid project ID: '{project_id}' (must be alphanumeric with dashes/underscores)"
)
if "git_platform" in project and project["git_platform"] not in ["github", "gitlab"]:
errors.append(
f"Invalid git_platform: '{project['git_platform']}' (must be 'github' or 'gitlab')"
)
if "path" in project:
project_path = Path(project["path"])
if not project_path.exists():
errors.append(f"Project path does not exist: {project['path']}")
elif not project_path.is_dir():
errors.append(f"Project path is not a directory: {project['path']}")
return errors
def cmd_list(args):
"""List all projects"""
config = load_config(args.config)
projects = get_projects(config)
if not projects:
print("No projects configured.")
print("")
print("Add a project with:")
print(
f' {sys.argv[0]} add --id my-project --name "My Project" --type godot --path /path/to/project ...'
)
return
print(f"\n{'='*80}")
print(f"Lazy_Bird Projects ({len(projects)} total)")
print(f"{'='*80}\n")
for i, project in enumerate(projects, 1):
enabled = project.get("enabled", True)
status = "✅ ENABLED" if enabled else "❌ DISABLED"
print(
f"{i}. [{project.get('id', 'unknown')}] {project.get('name', 'Unnamed')} ({project.get('type', 'unknown')})"
)
print(f" Status: {status}")
print(f" Path: {project.get('path', 'N/A')}")
print(f" Repository: {project.get('repository', 'N/A')}")
print(f" Platform: {project.get('git_platform', 'N/A')}")
print(f" Test: {project.get('test_command', 'N/A')}")
if project.get("build_command"):
print(f" Build: {project['build_command']}")
if project.get("lint_command"):
print(f" Lint: {project['lint_command']}")
print()
def cmd_show(args):
"""Show detailed information about a specific project"""
config = load_config(args.config)
projects = get_projects(config)
project = find_project(projects, args.project_id)
if not project:
print(f"Error: Project not found: {args.project_id}", file=sys.stderr)
sys.exit(1)
print(f"\n{'='*80}")
print(f"Project: {project.get('name', 'Unnamed')}")
print(f"{'='*80}\n")
for key, value in project.items():
if value is None or value == "":
value = "(not set)"
print(f"{key:20s}: {value}")
print()
def cmd_add(args):
"""Add a new project"""
config = load_config(args.config)
# Ensure projects array exists
if "projects" not in config:
config["projects"] = []
projects = config["projects"]
# Check if project ID already exists
if find_project(projects, args.id):
print(f"Error: Project with ID '{args.id}' already exists", file=sys.stderr)
sys.exit(1)
# Build new project
new_project = {
"id": args.id,
"name": args.name,
"type": args.type,
"path": args.path,
"repository": args.repository,
"git_platform": args.git_platform,
"test_command": args.test_command,
"build_command": args.build_command,
"lint_command": args.lint_command,
"format_command": args.format_command,
"enabled": True,
}
# Validate
errors = validate_project(new_project)
if errors:
print("Error: Invalid project configuration:", file=sys.stderr)
for error in errors:
print(f" - {error}", file=sys.stderr)
sys.exit(1)
# Add to config
projects.append(new_project)
config["projects"] = projects
# Save
save_config(args.config, config)
print(f"✅ Project '{args.id}' added successfully")
def cmd_remove(args):
"""Remove a project"""
config = load_config(args.config)
projects = get_projects(config)
project = find_project(projects, args.project_id)
if not project:
print(f"Error: Project not found: {args.project_id}", file=sys.stderr)
sys.exit(1)
# Confirm deletion
if not args.yes:
print(f"Are you sure you want to remove project '{args.project_id}'?")
print(f" Name: {project.get('name', 'N/A')}")
print(f" Path: {project.get('path', 'N/A')}")
response = input("Type 'yes' to confirm: ")
if response.lower() != "yes":
print("Cancelled.")
return
# Remove
projects.remove(project)
config["projects"] = projects
# Save
save_config(args.config, config)
print(f"✅ Project '{args.project_id}' removed successfully")
def cmd_edit(args):
"""Edit a project field"""
config = load_config(args.config)
projects = get_projects(config)
project = find_project(projects, args.project_id)
if not project:
print(f"Error: Project not found: {args.project_id}", file=sys.stderr)
sys.exit(1)
# Update field
old_value = project.get(args.field, "(not set)")
project[args.field] = args.value
print(f"Field '{args.field}':")
print(f" Old: {old_value}")
print(f" New: {args.value}")
# Validate
errors = validate_project(project, allow_partial=True)
if errors:
print("Warning: Project validation issues:", file=sys.stderr)
for error in errors:
print(f" - {error}", file=sys.stderr)
if not args.force:
print("\nUse --force to save anyway", file=sys.stderr)
sys.exit(1)
# Save
save_config(args.config, config)
print(f"✅ Project '{args.project_id}' updated successfully")
def cmd_enable(args):
"""Enable a project"""
config = load_config(args.config)
projects = get_projects(config)
project = find_project(projects, args.project_id)
if not project:
print(f"Error: Project not found: {args.project_id}", file=sys.stderr)
sys.exit(1)
if project.get("enabled", True):
print(f"Project '{args.project_id}' is already enabled")
return
project["enabled"] = True
save_config(args.config, config)
print(f"✅ Project '{args.project_id}' enabled")
def cmd_disable(args):
"""Disable a project"""
config = load_config(args.config)
projects = get_projects(config)
project = find_project(projects, args.project_id)
if not project:
print(f"Error: Project not found: {args.project_id}", file=sys.stderr)
sys.exit(1)
if not project.get("enabled", True):
print(f"Project '{args.project_id}' is already disabled")
return
project["enabled"] = False
save_config(args.config, config)
print(f"✅ Project '{args.project_id}' disabled")
def main():
"""Main CLI entry point"""
parser = argparse.ArgumentParser(
description="Lazy_Bird Project Manager - Manage multiple project configurations",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
Examples:
# List all projects
%(prog)s list
# Show detailed info for a project
%(prog)s show my-project
# Add a new project
%(prog)s add \\
--id my-game \\
--name "My Game Project" \\
--type godot \\
--path /home/user/projects/my-game \\
--repository https://github.com/user/my-game \\
--git-platform github \\
--test-command "godot --headless -s addons/gdUnit4/bin/GdUnitCmdTool.gd --test-suite all"
# Edit a project field
%(prog)s edit my-game --field test_command --value "new test command"
# Disable a project temporarily
%(prog)s disable my-game
# Re-enable a project
%(prog)s enable my-game
# Remove a project
%(prog)s remove my-game
""",
)
parser.add_argument(
"--config",
type=Path,
default=Path.home() / ".config" / "lazy_birtd" / "config.yml",
help="Path to configuration file (default: ~/.config/lazy_birtd/config.yml)",
)
subparsers = parser.add_subparsers(dest="command", help="Command to execute")
# list command
parser_list = subparsers.add_parser("list", help="List all projects")
parser_list.set_defaults(func=cmd_list)
# show command
parser_show = subparsers.add_parser("show", help="Show detailed project information")
parser_show.add_argument("project_id", help="Project ID")
parser_show.set_defaults(func=cmd_show)
# add command
parser_add = subparsers.add_parser("add", help="Add a new project")
parser_add.add_argument(
"--id", required=True, help="Unique project ID (alphanumeric with dashes)"
)
parser_add.add_argument("--name", required=True, help="Project display name")
parser_add.add_argument(
"--type", required=True, help="Project type (godot, python, rust, nodejs, etc.)"
)
parser_add.add_argument("--path", required=True, help="Absolute path to project directory")
parser_add.add_argument("--repository", required=True, help="Git repository URL")
parser_add.add_argument(
"--git-platform", required=True, choices=["github", "gitlab"], help="Git platform"
)
parser_add.add_argument("--test-command", required=True, help="Command to run tests")
parser_add.add_argument("--build-command", help="Command to build (optional)")
parser_add.add_argument("--lint-command", help="Command to lint code (optional)")
parser_add.add_argument("--format-command", help="Command to format code (optional)")
parser_add.set_defaults(func=cmd_add)
# remove command
parser_remove = subparsers.add_parser("remove", help="Remove a project")
parser_remove.add_argument("project_id", help="Project ID to remove")
parser_remove.add_argument("-y", "--yes", action="store_true", help="Skip confirmation")
parser_remove.set_defaults(func=cmd_remove)
# edit command
parser_edit = subparsers.add_parser("edit", help="Edit a project field")
parser_edit.add_argument("project_id", help="Project ID")
parser_edit.add_argument("--field", required=True, help="Field to edit")
parser_edit.add_argument("--value", required=True, help="New value")
parser_edit.add_argument(
"--force", action="store_true", help="Force save even if validation fails"
)
parser_edit.set_defaults(func=cmd_edit)
# enable command
parser_enable = subparsers.add_parser("enable", help="Enable a project")
parser_enable.add_argument("project_id", help="Project ID to enable")
parser_enable.set_defaults(func=cmd_enable)
# disable command
parser_disable = subparsers.add_parser("disable", help="Disable a project")
parser_disable.add_argument("project_id", help="Project ID to disable")
parser_disable.set_defaults(func=cmd_disable)
# Parse args
args = parser.parse_args()
if not args.command:
parser.print_help()
sys.exit(1)
# Execute command
args.func(args)
if __name__ == "__main__":
main()
| #!/usr/bin/env python3
"""
Lazy_Bird Project Manager CLI
Manage multiple projects in Phase 1.1 multi-project configuration
"""
import sys
import argparse
from pathlib import Path
from typing import Dict, List, Optional
import json
def load_config(config_path: Path) -> Dict:
"""Load configuration from YAML file"""
if not config_path.exists():
print(f"Error: Configuration file not found: {config_path}", file=sys.stderr)
sys.exit(1)
try:
import yaml
with open(config_path, "r") as f:
return yaml.safe_load(f)
except ImportError:
print("Error: PyYAML not installed. Install with: pip3 install pyyaml", file=sys.stderr)
sys.exit(1)
except Exception as e:
print(f"Error loading configuration: {e}", file=sys.stderr)
sys.exit(1)
def save_config(config_path: Path, config: Dict):
"""Save configuration to YAML file"""
try:
import yaml
# Backup existing config
if config_path.exists():
backup_path = config_path.with_suffix(".yml.backup")
config_path.rename(backup_path)
print(f"✅ Backup created: {backup_path}")
with open(config_path, "w") as f:
| [] | yusufkaraaslan/lazy-bird | scripts/project-manager.py |
#!/usr/bin/env python3
"""
Advanced test error parser with structured JSON output
Extracts structured error information from test logs including:
- Test names
- File paths and line numbers
- Error messages
- Stack traces
- Test statistics
Supports: Godot (gdUnit4), Python (pytest), Rust (cargo test)
"""
import sys
import re
import json
from pathlib import Path
from typing import Dict, List, Any, Optional
class TestErrorParser:
"""Base class for test error parsing"""
def parse(self, log_content: str) -> Dict[str, Any]:
"""Parse test log and return structured error data"""
raise NotImplementedError
class GodotErrorParser(TestErrorParser):
"""Parser for Godot gdUnit4 test output"""
def parse(self, log_content: str) -> Dict[str, Any]:
"""Parse Godot test errors with file/line extraction"""
errors = []
stats = {"total": 0, "passed": 0, "failed": 0, "errors": 0}
# Extract test statistics
stats_pattern = (
r"Tests:\s*(\d+)\s*\|\s*Passed:\s*(\d+)\s*\|\s*Failed:\s*(\d+)\s*\|\s*Errors:\s*(\d+)"
)
stats_match = re.search(stats_pattern, log_content)
if stats_match:
stats = {
"total": int(stats_match.group(1)),
"passed": int(stats_match.group(2)),
"failed": int(stats_match.group(3)),
"errors": int(stats_match.group(4)),
}
# Extract failed tests with details
# Pattern: FAILED: test_name ... at res://path/file.gd:42
failed_pattern = r"FAILED:\s+(\S+)\s+(.*?)\s+at\s+(res://[^:]+):(\d+)"
for match in re.finditer(failed_pattern, log_content, re.DOTALL):
test_name = match.group(1)
error_msg = match.group(2).strip()
file_path = match.group(3)
line_number = int(match.group(4))
errors.append(
{
"test_name": test_name,
"file": file_path,
"line": line_number,
"error": error_msg,
"type": "test_failure",
}
)
# Also capture assertion failures
assert_pattern = r"Assertion\s+failed:?\s+(.*?)\s+at\s+(res://[^:]+):(\d+)"
for match in re.finditer(assert_pattern, log_content):
error_msg = match.group(1).strip()
file_path = match.group(2)
line_number = int(match.group(3))
errors.append(
{
"test_name": "assertion",
"file": file_path,
"line": line_number,
"error": error_msg,
"type": "assertion_failure",
}
)
return {"framework": "godot", "stats": stats, "errors": errors, "error_count": len(errors)}
class PythonErrorParser(TestErrorParser):
"""Parser for Python pytest output"""
def parse(self, log_content: str) -> Dict[str, Any]:
"""Parse pytest errors with stack trace extraction"""
errors = []
stats = {"total": 0, "passed": 0, "failed": 0, "errors": 0}
# Extract test statistics from summary line
# Example: "3 failed, 10 passed in 2.5s"
stats_pattern = r"(\d+)\s+failed.*?(\d+)\s+passed"
stats_match = re.search(stats_pattern, log_content)
if stats_match:
stats["failed"] = int(stats_match.group(1))
stats["passed"] = int(stats_match.group(2))
stats["total"] = stats["failed"] + stats["passed"]
# Extract failed tests with file/line info
# Pattern: FAILED path/file.py::test_name - AssertionError: message
failed_pattern = r"FAILED\s+([^:]+)::(\S+)\s+-\s+(.*?)$"
for match in re.finditer(failed_pattern, log_content, re.MULTILINE):
file_path = match.group(1)
test_name = match.group(2)
error_msg = match.group(3).strip()
# Try to extract line number from stack trace
line_number = None
stack_pattern = rf"{re.escape(file_path)}:(\d+):"
stack_match = re.search(stack_pattern, log_content)
if stack_match:
line_number = int(stack_match.group(1))
errors.append(
{
"test_name": test_name,
"file": file_path,
"line": line_number,
"error": error_msg,
"type": "test_failure",
}
)
# Extract AssertionError details with line numbers
assert_pattern = r"([^:\s]+):(\d+):\s+in\s+\S+\s+.*?AssertionError:\s+(.*?)(?:\n|$)"
for match in re.finditer(assert_pattern, log_content):
file_path = match.group(1)
line_number = int(match.group(2))
error_msg = match.group(3).strip()
errors.append(
{
"test_name": "assertion",
"file": file_path,
"line": line_number,
"error": error_msg,
"type": "assertion_error",
}
)
return {"framework": "python", "stats": stats, "errors": errors, "error_count": len(errors)}
class RustErrorParser(TestErrorParser):
"""Parser for Rust cargo test output"""
def parse(self, log_content: str) -> Dict[str, Any]:
"""Parse Rust test errors"""
errors = []
stats = {"total": 0, "passed": 0, "failed": 0, "errors": 0}
# Extract test statistics
# Example: "test result: FAILED. 10 passed; 3 failed; 0 ignored"
stats_pattern = r"test\s+result:.*?(\d+)\s+passed;\s+(\d+)\s+failed"
stats_match = re.search(stats_pattern, log_content)
if stats_match:
stats["passed"] = int(stats_match.group(1))
stats["failed"] = int(stats_match.group(2))
stats["total"] = stats["passed"] + stats["failed"]
# Extract failed tests with panic info
# Pattern: ---- tests::test_name stdout ----
# followed by panic message and file:line
test_pattern = r"----\s+([\w:]+)\s+stdout\s+----\s+(.*?)(?=\n\n|$)"
for match in re.finditer(test_pattern, log_content, re.DOTALL):
test_name = match.group(1)
output = match.group(2)
# Extract file and line from panic
panic_pattern = r"panicked at '([^']+)',\s+([^:]+):(\d+):\d+"
panic_match = re.search(panic_pattern, output)
if panic_match:
error_msg = panic_match.group(1)
file_path = panic_match.group(2)
line_number = int(panic_match.group(3))
errors.append(
{
"test_name": test_name,
"file": file_path,
"line": line_number,
"error": error_msg,
"type": "panic",
}
)
return {"framework": "rust", "stats": stats, "errors": errors, "error_count": len(errors)}
def parse_test_errors(log_path: str, project_type: str) -> Dict[str, Any]:
"""
Parse test errors from log file based on project type
Args:
log_path: Path to test output log
project_type: Type of project (godot, python, rust, etc.)
Returns:
Structured error data as dictionary
"""
# Read log file
try:
with open(log_path, "r") as f:
log_content = f.read()
except FileNotFoundError:
return {
"framework": project_type,
"error": "Log file not found",
"stats": {},
"errors": [],
"error_count": 0,
}
# Select appropriate parser
parsers = {
"godot": GodotErrorParser(),
"python": PythonErrorParser(),
"rust": RustErrorParser(),
}
parser = parsers.get(project_type)
if not parser:
# Generic fallback
return {
"framework": project_type,
"raw_output": log_content[-1000:], # Last 1000 chars
"stats": {},
"errors": [],
"error_count": 0,
}
return parser.parse(log_content)
def format_error_summary(error_data: Dict[str, Any]) -> str:
"""
Format error data into human-readable summary for Claude
Args:
error_data: Structured error data from parse_test_errors
Returns:
Formatted error summary string
"""
lines = []
lines.append("**Test Error Summary:**\n")
# Add statistics
stats = error_data.get("stats", {})
if stats:
lines.append(
f"Tests: {stats.get('total', 0)} | "
f"Passed: {stats.get('passed', 0)} | "
f"Failed: {stats.get('failed', 0)} | "
f"Errors: {stats.get('errors', 0)}\n"
)
# Add individual errors
errors = error_data.get("errors", [])
if errors:
lines.append(f"\n**{len(errors)} Error(s) Found:**\n")
for i, error in enumerate(errors, 1):
lines.append(f"\n{i}. **{error.get('test_name', 'unknown')}**")
if error.get("file"):
file_line = f" File: `{error['file']}"
if error.get("line"):
file_line += f":{error['line']}`"
else:
file_line += "`"
lines.append(file_line)
lines.append(f" Error: {error.get('error', 'Unknown error')}")
lines.append("")
return "\n".join(lines)
if __name__ == "__main__":
if len(sys.argv) < 3:
print("Usage: parse_test_errors.py <log_path> <project_type> [--json]")
sys.exit(1)
log_path = sys.argv[1]
project_type = sys.argv[2]
output_json = "--json" in sys.argv
# Parse errors
error_data = parse_test_errors(log_path, project_type)
if output_json:
# Output as JSON
print(json.dumps(error_data, indent=2))
else:
# Output as formatted summary
print(format_error_summary(error_data))
| #!/usr/bin/env python3
"""
Advanced test error parser with structured JSON output
Extracts structured error information from test logs including:
- Test names
- File paths and line numbers
- Error messages
- Stack traces
- Test statistics
Supports: Godot (gdUnit4), Python (pytest), Rust (cargo test)
"""
import sys
import re
import json
from pathlib import Path
from typing import Dict, List, Any, Optional
class TestErrorParser:
"""Base class for test error parsing"""
def parse(self, log_content: str) -> Dict[str, Any]:
"""Parse test log and return structured error data"""
raise NotImplementedError
class GodotErrorParser(TestErrorParser):
"""Parser for Godot gdUnit4 test output"""
def parse(self, log_content: str) -> Dict[str, Any]:
"""Parse Godot test errors with file/line extraction"""
errors = []
stats = {"total": 0, "passed": 0, "failed": 0, "errors": 0}
# Extract test statistics
stats_pattern = (
r"Tests:\s*(\d+)\s*\|\s*Passed:\s*(\d+)\s*\|\s*Failed:\s*(\d+)\s*\|\s*Errors:\s*(\d+)"
)
stats_match = re.search(stats_pattern, log_content)
if stats_match:
stats = {
"total": int(stats_match.group(1)),
"passed": int(stats_match.group( | [] | yusufkaraaslan/lazy-bird | scripts/parse_test_errors.py |
#!/usr/bin/env python3
"""
Lazy_Bird Issue Watcher Service
Polls GitHub/GitLab for issues labeled 'ready' and queues them for processing
Phase 1.1: Multi-project support
"""
import time
import sys
import json
import logging
import requests
from pathlib import Path
from typing import Dict, List, Optional, Set
from datetime import datetime
# Configure logging
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger = logging.getLogger("issue-watcher")
class ProjectWatcher:
"""Monitors a single project's GitHub/GitLab for ready-to-process issues"""
def __init__(self, project_config: Dict, global_config: Dict):
"""Initialize watcher for a single project"""
self.project_config = project_config
self.global_config = global_config
# Project identification
self.project_id = project_config["id"]
self.project_name = project_config["name"]
self.project_type = project_config["type"]
self.project_path = Path(project_config["path"])
# Platform and repository
self.platform = project_config["git_platform"]
self.repository = project_config["repository"]
# Load API token (project-specific or shared)
self.token = self.load_token()
logger.info(f" [{self.project_id}] {self.project_name} ({self.project_type})")
logger.info(f" Repository: {self.repository}")
def load_token(self) -> str:
"""Load API token from secrets directory"""
secrets_dir = Path.home() / ".config" / "lazy_birtd" / "secrets"
# Try project-specific token first
token_file = secrets_dir / f"{self.project_id}_token"
if not token_file.exists():
# Try platform-specific token
token_file = secrets_dir / f"{self.platform}_token"
if not token_file.exists():
# Fall back to generic api_token
token_file = secrets_dir / "api_token"
if not token_file.exists():
logger.error(f"[{self.project_id}] API token not found")
logger.error(
f"Create token file: echo 'YOUR_TOKEN' > ~/.config/lazy_birtd/secrets/api_token"
)
logger.error(f"Set permissions: chmod 600 ~/.config/lazy_birtd/secrets/api_token")
raise FileNotFoundError(f"API token not found for project {self.project_id}")
try:
token = token_file.read_text().strip()
if not token:
raise ValueError(f"Token file is empty: {token_file}")
return token
except Exception as e:
logger.error(f"[{self.project_id}] Failed to read token: {e}")
raise
def fetch_ready_issues(self) -> List[Dict]:
"""Fetch issues with 'ready' label from GitHub/GitLab"""
try:
if self.platform == "github":
return self.fetch_github_issues()
elif self.platform == "gitlab":
return self.fetch_gitlab_issues()
else:
logger.error(f"[{self.project_id}] Unsupported platform: {self.platform}")
return []
except requests.exceptions.RequestException as e:
logger.error(f"[{self.project_id}] API request failed: {e}")
return []
except Exception as e:
logger.error(f"[{self.project_id}] Unexpected error fetching issues: {e}")
return []
def fetch_github_issues(self) -> List[Dict]:
"""Fetch from GitHub API"""
# Parse owner/repo from repository URL or string
repo_parts = self.repository.rstrip("/").split("/")
owner = repo_parts[-2]
repo = repo_parts[-1]
url = f"https://api.github.com/repos/{owner}/{repo}/issues"
headers = {
"Authorization": f"token {self.token}",
"Accept": "application/vnd.github.v3+json",
}
params = {"labels": "ready", "state": "open", "sort": "created", "direction": "asc"}
response = requests.get(url, headers=headers, params=params, timeout=30)
response.raise_for_status()
issues = []
for issue in response.json():
# Skip pull requests (they appear as issues in GitHub API)
if "pull_request" in issue:
continue
issues.append(
{
"id": issue["number"],
"title": issue["title"],
"body": issue["body"] or "",
"labels": [l["name"] for l in issue["labels"]],
"url": issue["html_url"],
"created_at": issue["created_at"],
}
)
return issues
def fetch_gitlab_issues(self) -> List[Dict]:
"""Fetch from GitLab API"""
# Get project ID from config or parse from URL
project_id = self.project_config.get("project_id")
if not project_id:
# Try to get project ID from API using project path
project_path = self.repository.rstrip("/").split("/")[-2:]
project_path_str = "/".join(project_path)
url = f"https://gitlab.com/api/v4/projects/{requests.utils.quote(project_path_str, safe='')}"
headers = {"PRIVATE-TOKEN": self.token}
try:
response = requests.get(url, headers=headers, timeout=30)
response.raise_for_status()
project_id = response.json()["id"]
except Exception as e:
logger.error(f"[{self.project_id}] Failed to get GitLab project ID: {e}")
return []
url = f"https://gitlab.com/api/v4/projects/{project_id}/issues"
headers = {"PRIVATE-TOKEN": self.token}
params = {"labels": "ready", "state": "opened", "order_by": "created_at", "sort": "asc"}
response = requests.get(url, headers=headers, params=params, timeout=30)
response.raise_for_status()
issues = []
for issue in response.json():
issues.append(
{
"id": issue["iid"],
"title": issue["title"],
"body": issue["description"] or "",
"labels": issue["labels"],
"url": issue["web_url"],
"created_at": issue["created_at"],
}
)
return issues
def parse_issue(self, issue: Dict) -> Dict:
"""Extract structured data from issue and add project context"""
body = issue["body"]
# Extract complexity from labels or body
complexity = "medium" # default
for label in issue["labels"]:
if label in ["simple", "medium", "complex"]:
complexity = label
break
# Parse sections from markdown body
sections = self.parse_markdown_sections(body)
# Extract detailed steps
steps = sections.get("Detailed Steps", [])
# Extract acceptance criteria
acceptance_criteria = sections.get("Acceptance Criteria", [])
# Build task with project context
return {
"issue_id": issue["id"],
"title": issue["title"],
"body": body,
"steps": steps,
"acceptance_criteria": acceptance_criteria,
"complexity": complexity,
"url": issue["url"],
"queued_at": datetime.utcnow().isoformat(),
"platform": self.platform,
"repository": self.repository,
# NEW: Project context for multi-project support
"project_id": self.project_id,
"project_name": self.project_name,
"project_type": self.project_type,
"project_path": str(self.project_path),
"test_command": self.project_config.get("test_command"),
"build_command": self.project_config.get("build_command"),
"lint_command": self.project_config.get("lint_command"),
"format_command": self.project_config.get("format_command"),
}
def parse_markdown_sections(self, body: str) -> Dict[str, List[str]]:
"""Parse markdown body into sections"""
sections = {}
current_section = None
current_content = []
for line in body.split("\n"):
# Check for section headers (## Header)
if line.strip().startswith("##"):
# Save previous section
if current_section:
sections[current_section] = current_content
# Start new section
current_section = line.strip().lstrip("#").strip()
current_content = []
elif current_section:
# Add content to current section
stripped = line.strip()
if stripped and (
stripped.startswith(
(
"1.",
"2.",
"3.",
"4.",
"5.",
"6.",
"7.",
"8.",
"9.",
"-",
"*",
"[ ]",
"[x]",
)
)
):
current_content.append(stripped)
# Save last section
if current_section:
sections[current_section] = current_content
return sections
def update_issue_labels(self, issue: Dict):
"""Remove 'ready' label and add 'in-queue' label"""
try:
if self.platform == "github":
self.update_github_labels(issue)
elif self.platform == "gitlab":
self.update_gitlab_labels(issue)
except Exception as e:
logger.error(
f"[{self.project_id}] Failed to update labels for issue #{issue['id']}: {e}"
)
def update_github_labels(self, issue: Dict):
"""Update GitHub issue labels using gh CLI"""
import subprocess
repo_parts = self.repository.rstrip("/").split("/")
owner = repo_parts[-2]
repo = repo_parts[-1]
repo_name = f"{owner}/{repo}"
try:
# Remove 'ready' label using gh CLI
result = subprocess.run(
[
"gh",
"issue",
"edit",
str(issue["id"]),
"--repo",
repo_name,
"--remove-label",
"ready",
],
capture_output=True,
text=True,
timeout=30,
)
if result.returncode != 0:
logger.warning(
f"[{self.project_id}] Failed to remove 'ready' label: {result.stderr}"
)
# Add 'in-queue' label using gh CLI
result = subprocess.run(
[
"gh",
"issue",
"edit",
str(issue["id"]),
"--repo",
repo_name,
"--add-label",
"in-queue",
],
capture_output=True,
text=True,
timeout=30,
)
if result.returncode != 0:
logger.warning(
f"[{self.project_id}] Failed to add 'in-queue' label: {result.stderr}"
)
else:
logger.info(f"[{self.project_id}] ✅ Labels updated: ready → in-queue")
except subprocess.TimeoutExpired:
logger.error(f"[{self.project_id}] Timeout updating labels via gh CLI")
except Exception as e:
logger.error(f"[{self.project_id}] Error updating labels: {e}")
def update_gitlab_labels(self, issue: Dict):
"""Update GitLab issue labels"""
project_id = self.project_config.get("project_id")
if not project_id:
logger.warning(
f"[{self.project_id}] GitLab project_id not configured, cannot update labels"
)
return
headers = {"PRIVATE-TOKEN": self.token}
# Get current labels
current_labels = [l for l in issue["labels"] if l != "ready"]
current_labels.append("processing")
# Update issue with new labels
url = f"https://gitlab.com/api/v4/projects/{project_id}/issues/{issue['id']}"
data = {"labels": ",".join(current_labels)}
response = requests.put(url, headers=headers, json=data, timeout=30)
if response.status_code not in [200, 201]:
logger.warning(
f"[{self.project_id}] Failed to update GitLab labels: {response.status_code}"
)
class IssueWatcher:
"""Orchestrates monitoring of multiple projects (Phase 1.1)"""
def __init__(self, config_path: Path):
"""Initialize multi-project watcher with configuration"""
self.config_path = config_path
self.config = self.load_config()
# Polling configuration
self.poll_interval = self.config.get("poll_interval_seconds", 60)
# Load projects
self.project_watchers = self.load_projects()
# State management (per-project)
self.processed_issues = self.load_processed_issues()
logger.info(f"Issue Watcher initialized (Phase 1.1 Multi-Project)")
logger.info(f" Monitoring {len(self.project_watchers)} project(s)")
logger.info(f" Poll interval: {self.poll_interval}s")
def load_config(self) -> Dict:
"""Load configuration from YAML or JSON file"""
if not self.config_path.exists():
logger.error(f"Configuration file not found: {self.config_path}")
sys.exit(1)
try:
# Support both YAML and JSON
config_text = self.config_path.read_text()
if self.config_path.suffix in [".yml", ".yaml"]:
try:
import yaml
return yaml.safe_load(config_text)
except ImportError:
logger.error("PyYAML not installed. Install with: pip3 install pyyaml")
sys.exit(1)
else:
return json.loads(config_text)
except Exception as e:
logger.error(f"Failed to load configuration: {e}")
sys.exit(1)
def load_projects(self) -> List[ProjectWatcher]:
"""Load project configurations and create watchers"""
watchers = []
# Phase 1.1: Check for new 'projects' array
if "projects" in self.config and self.config["projects"]:
projects = self.config["projects"]
logger.info(f"Loading {len(projects)} project(s) from 'projects' array")
for project in projects:
# Skip disabled projects
if not project.get("enabled", True):
logger.info(f" [{project['id']}] Skipping (disabled)")
continue
# Validate required fields
required = [
"id",
"name",
"type",
"path",
"repository",
"git_platform",
"test_command",
]
missing = [f for f in required if f not in project]
if missing:
logger.error(
f" [{project.get('id', 'unknown')}] Missing required fields: {missing}"
)
continue
try:
watcher = ProjectWatcher(project, self.config)
watchers.append(watcher)
except Exception as e:
logger.error(f" [{project['id']}] Failed to initialize: {e}")
continue
# BACKWARD COMPATIBILITY: Legacy single-project format
elif "project" in self.config or "repository" in self.config:
logger.info("Loading single project (legacy format)")
# Build project config from legacy format
legacy_project = {
"id": "default",
"name": self.config.get("project", {}).get("name", "Default Project"),
"type": self.config.get("project", {}).get("type", "godot"),
"path": self.config.get("project", {}).get("path", "."),
"repository": self.config.get("repository", ""),
"git_platform": self.config.get("git_platform", "github"),
"test_command": self.config.get("test_command", ""),
"build_command": self.config.get("build_command"),
"lint_command": self.config.get("lint_command"),
"format_command": self.config.get("format_command"),
"enabled": True,
}
try:
watcher = ProjectWatcher(legacy_project, self.config)
watchers.append(watcher)
except Exception as e:
logger.error(f"Failed to initialize legacy project: {e}")
sys.exit(1)
else:
logger.error("No projects configured!")
logger.error("Add a 'projects' array to your config.yml")
logger.error("See: config/config.example.yml for examples")
sys.exit(1)
if not watchers:
logger.error("No enabled projects found")
sys.exit(1)
return watchers
def load_processed_issues(self) -> Set[str]:
"""Load set of already-processed issue IDs (format: project-id:issue-number)"""
data_dir = Path.home() / ".config" / "lazy_birtd" / "data"
data_dir.mkdir(parents=True, exist_ok=True)
processed_file = data_dir / "processed_issues.json"
if processed_file.exists():
try:
data = json.loads(processed_file.read_text())
return set(data)
except Exception as e:
logger.warning(f"Failed to load processed issues: {e}")
return set()
return set()
def save_processed_issues(self):
"""Save processed issue IDs to disk"""
data_dir = Path.home() / ".config" / "lazy_birtd" / "data"
data_dir.mkdir(parents=True, exist_ok=True)
processed_file = data_dir / "processed_issues.json"
try:
processed_file.write_text(json.dumps(list(self.processed_issues), indent=2))
except Exception as e:
logger.error(f"Failed to save processed issues: {e}")
def queue_task(self, parsed_issue: Dict, project_watcher: ProjectWatcher):
"""Add task to processing queue with project context"""
queue_dir = Path("/var/lib/lazy_birtd/queue")
# Create queue directory if it doesn't exist
try:
queue_dir.mkdir(parents=True, exist_ok=True)
except PermissionError:
# Fall back to user directory if /var/lib not writable
queue_dir = Path.home() / ".config" / "lazy_birtd" / "queue"
queue_dir.mkdir(parents=True, exist_ok=True)
logger.warning(f"Using fallback queue directory: {queue_dir}")
# Use project-id:issue-number for unique task file naming
task_id = f"{parsed_issue['project_id']}-{parsed_issue['issue_id']}"
task_file = queue_dir / f"task-{task_id}.json"
try:
task_file.write_text(json.dumps(parsed_issue, indent=2))
logger.info(
f"[{project_watcher.project_id}] ✅ Queued task #{parsed_issue['issue_id']}: {parsed_issue['title']}"
)
except Exception as e:
logger.error(f"[{project_watcher.project_id}] Failed to queue task: {e}")
raise
def run(self):
"""Main loop - poll all projects for issues and process them"""
project_names = ", ".join([pw.project_name for pw in self.project_watchers])
logger.info(f"🔍 Issue Watcher started (Phase 1.1 Multi-Project)")
logger.info(f" Projects: {project_names}")
logger.info(f" Polling every {self.poll_interval} seconds")
logger.info(f" Press Ctrl+C to stop")
logger.info("")
while True:
try:
total_new_issues = 0
# Poll each project in sequence
for project_watcher in self.project_watchers:
try:
# Fetch issues with 'ready' label for this project
issues = project_watcher.fetch_ready_issues()
# Filter out already-processed issues (using project-id:issue-number format)
new_issues = []
for issue in issues:
issue_key = f"{project_watcher.project_id}:{issue['id']}"
if issue_key not in self.processed_issues:
new_issues.append(issue)
if new_issues:
logger.info(
f"[{project_watcher.project_id}] Found {len(new_issues)} new task(s)"
)
total_new_issues += len(new_issues)
# Process each new issue
for issue in new_issues:
logger.info(
f"[{project_watcher.project_id}] Processing issue #{issue['id']}: {issue['title']}"
)
# Parse issue into task format (includes project context)
parsed = project_watcher.parse_issue(issue)
# Queue the task
self.queue_task(parsed, project_watcher)
# Update labels on the issue
project_watcher.update_issue_labels(issue)
# Mark as processed (project-id:issue-number)
issue_key = f"{project_watcher.project_id}:{issue['id']}"
self.processed_issues.add(issue_key)
self.save_processed_issues()
logger.info(
f"[{project_watcher.project_id}] ✅ Issue #{issue['id']} queued successfully"
)
except Exception as e:
logger.error(
f"[{project_watcher.project_id}] Error processing project: {e}"
)
# Continue to next project instead of crashing
# Log summary if any new issues found
if total_new_issues == 0:
logger.debug(
f"No new issues found across {len(self.project_watchers)} projects"
)
# Sleep until next poll
time.sleep(self.poll_interval)
except KeyboardInterrupt:
logger.info("\n👋 Shutting down gracefully...")
break
except Exception as e:
logger.error(f"❌ Unexpected error in main loop: {e}")
logger.info(f"Retrying in {self.poll_interval} seconds...")
time.sleep(self.poll_interval)
def main():
"""Entry point"""
# Look for config file
config_path = Path.home() / ".config" / "lazy_birtd" / "config.yml"
# Also check for .json extension
if not config_path.exists():
config_path = Path.home() / ".config" / "lazy_birtd" / "config.json"
if not config_path.exists():
logger.error("Configuration file not found")
logger.error(f"Expected: ~/.config/lazy_birtd/config.yml")
logger.error("")
logger.error("Create configuration with:")
logger.error(" mkdir -p ~/.config/lazy_birtd")
logger.error(" cp config/config.example.yml ~/.config/lazy_birtd/config.yml")
logger.error("")
logger.error("Or create minimal config:")
logger.error(" cat > ~/.config/lazy_birtd/config.yml << 'EOF'")
logger.error(" projects:")
logger.error(" - id: my-project")
logger.error(" name: My Project")
logger.error(" type: godot")
logger.error(" path: /path/to/project")
logger.error(" repository: https://github.com/owner/repo")
logger.error(" git_platform: github")
logger.error(
" test_command: 'godot --headless -s addons/gdUnit4/bin/GdUnitCmdTool.gd --test-suite all'"
)
logger.error(" build_command: null")
logger.error(" enabled: true")
logger.error(" poll_interval_seconds: 60")
logger.error(" EOF")
sys.exit(1)
# Create and run watcher
watcher = IssueWatcher(config_path)
watcher.run()
if __name__ == "__main__":
main()
| #!/usr/bin/env python3
"""
Lazy_Bird Issue Watcher Service
Polls GitHub/GitLab for issues labeled 'ready' and queues them for processing
Phase 1.1: Multi-project support
"""
import time
import sys
import json
import logging
import requests
from pathlib import Path
from typing import Dict, List, Optional, Set
from datetime import datetime
# Configure logging
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
handlers=[logging.StreamHandler(sys.stdout)],
)
logger = logging.getLogger("issue-watcher")
class ProjectWatcher:
"""Monitors a single project's GitHub/GitLab for ready-to-process issues"""
def __init__(self, project_config: Dict, global_config: Dict):
"""Initialize watcher for a single project"""
self.project_config = project_config
self.global_config = global_config
# Project identification
self.project_id = project_config["id"]
self.project_name = project_config["name"]
self.project_type = project_config["type"]
self.project_path = Path(project_config["path"])
# Platform and repository
self.platform = project_config["git_platform"]
self.repository = project_config["repository"]
# Load API token (project-specific or shared)
self.token = self.load_token()
logger.info(f" [{self.project_id}] {self.project_name} ({self.project | [] | yusufkaraaslan/lazy-bird | scripts/issue-watcher.py |
#!/usr/bin/env python3
"""
Godot Server - Test Coordination Service
========================================
HTTP API server that queues and executes Godot tests sequentially,
allowing multiple Claude Code agents to coordinate test execution safely.
Features:
- REST API for test submission and status checking
- Sequential test execution (one at a time)
- Job queue management with priorities
- Test result parsing (gdUnit4, GUT)
- Timeout enforcement
- Resource monitoring
- Artifact storage
Usage:
python3 godot-server.py [--port 5000] [--host 127.0.0.1]
API Endpoints:
POST /test/submit - Submit new test job
GET /test/status/<id> - Check job status
GET /test/results/<id> - Get detailed results
DELETE /test/cancel/<id> - Cancel queued/running job
GET /health - Health check
GET /queue - View current queue
"""
import os
import sys
import json
import time
import uuid
import subprocess
import threading
import logging
import xml.etree.ElementTree as ET
from datetime import datetime, timedelta
from enum import Enum
from dataclasses import dataclass, asdict
from typing import Optional, Dict, List, Any
from queue import Queue, Empty
from pathlib import Path
try:
from flask import Flask, request, jsonify, send_file
import psutil
except ImportError:
print("Error: Required packages not installed")
print("Install: pip3 install flask psutil")
sys.exit(1)
# Configuration
# Default artifacts directory (can be overridden by env var or command line)
DEFAULT_ARTIFACTS_DIR = os.environ.get(
"LAZY_BIRD_ARTIFACTS_DIR", str(Path.home() / ".local/share/lazy_birtd/tests")
)
MAX_QUEUE_SIZE = 50
DEFAULT_TIMEOUT = 300 # 5 minutes
JOB_RETENTION_DAYS = 7
# Logging setup
logging.basicConfig(
level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger("godot-server")
# Flask app
app = Flask(__name__)
class JobStatus(Enum):
"""Job lifecycle states"""
QUEUED = "queued"
RUNNING = "running"
COMPLETE = "complete"
FAILED = "failed"
TIMEOUT = "timeout"
CANCELLED = "cancelled"
class Priority(Enum):
"""Job priority levels"""
HIGH = 1 # Retry attempts, critical fixes
NORMAL = 2 # Regular tasks
LOW = 3 # Non-blocking refactors
@dataclass
class TestJob:
"""Test job specification"""
job_id: str
project_path: str
test_suite: str = "all"
framework: str = "gdUnit4"
timeout_seconds: int = DEFAULT_TIMEOUT
agent_id: Optional[str] = None
task_id: Optional[int] = None
callback_url: Optional[str] = None
priority: Priority = Priority.NORMAL
# Status tracking
status: JobStatus = JobStatus.QUEUED
submitted_at: Optional[datetime] = None
started_at: Optional[datetime] = None
completed_at: Optional[datetime] = None
# Results
result: Optional[str] = None # "passed" or "failed"
tests_run: int = 0
tests_passed: int = 0
tests_failed: int = 0
output: str = ""
error_message: str = ""
# Artifacts
log_path: Optional[str] = None
junit_path: Optional[str] = None
@dataclass
class TestSummary:
"""Test execution summary"""
total: int = 0
passed: int = 0
failed: int = 0
skipped: int = 0
errors: int = 0
class JobQueue:
"""Thread-safe job queue with priority support"""
def __init__(self, maxsize: int = MAX_QUEUE_SIZE):
self.queue = Queue(maxsize=maxsize)
self.jobs: Dict[str, TestJob] = {}
self.active_job: Optional[TestJob] = None
self.lock = threading.Lock()
def submit(self, job: TestJob) -> bool:
"""Submit a new job to the queue"""
try:
with self.lock:
if len(self.jobs) >= MAX_QUEUE_SIZE:
return False
job.submitted_at = datetime.now()
self.jobs[job.job_id] = job
self.queue.put(job, block=False)
logger.info(f"Job {job.job_id} submitted (task #{job.task_id})")
return True
except Exception as e:
logger.error(f"Failed to submit job: {e}")
return False
def get_next(self, timeout: float = 1.0) -> Optional[TestJob]:
"""Get next job from queue (blocking with timeout)"""
try:
job = self.queue.get(timeout=timeout)
with self.lock:
self.active_job = job
return job
except Empty:
return None
def get_job(self, job_id: str) -> Optional[TestJob]:
"""Get job by ID"""
with self.lock:
return self.jobs.get(job_id)
def update_job(self, job: TestJob):
"""Update job in registry"""
with self.lock:
self.jobs[job.job_id] = job
def cancel_job(self, job_id: str) -> bool:
"""Cancel a queued or running job"""
with self.lock:
job = self.jobs.get(job_id)
if not job:
return False
if job.status == JobStatus.QUEUED:
job.status = JobStatus.CANCELLED
job.completed_at = datetime.now()
return True
# Cannot cancel running jobs (would require process tracking)
return False
def get_queue_position(self, job_id: str) -> int:
"""Get position in queue (1-indexed)"""
with self.lock:
queued_jobs = [j for j in self.jobs.values() if j.status == JobStatus.QUEUED]
queued_jobs.sort(key=lambda j: j.submitted_at)
for i, job in enumerate(queued_jobs, 1):
if job.job_id == job_id:
return i
return 0
def get_queue_depth(self) -> int:
"""Get number of queued jobs"""
with self.lock:
return sum(1 for j in self.jobs.values() if j.status == JobStatus.QUEUED)
def cleanup_old_jobs(self, days: int = JOB_RETENTION_DAYS):
"""Remove jobs older than specified days"""
cutoff = datetime.now() - timedelta(days=days)
with self.lock:
old_jobs = [
jid
for jid, job in self.jobs.items()
if job.completed_at and job.completed_at < cutoff
]
for jid in old_jobs:
del self.jobs[jid]
logger.info(f"Cleaned up old job {jid}")
class TestExecutor:
"""Executes Godot tests and parses results"""
def __init__(self, artifacts_dir: Path):
self.artifacts_dir = artifacts_dir
self.artifacts_dir.mkdir(parents=True, exist_ok=True)
self.current_process: Optional[subprocess.Popen] = None
def execute(self, job: TestJob) -> TestJob:
"""Execute a test job"""
logger.info(f"Executing job {job.job_id}: {job.project_path}")
job.status = JobStatus.RUNNING
job.started_at = datetime.now()
# Validate project path
if not os.path.exists(job.project_path):
job.status = JobStatus.FAILED
job.error_message = f"Project path not found: {job.project_path}"
job.completed_at = datetime.now()
return job
# Create artifacts directory for this job
job_artifacts_dir = self.artifacts_dir / job.job_id
job_artifacts_dir.mkdir(parents=True, exist_ok=True)
# Build command based on framework
cmd = self._build_command(job, job_artifacts_dir)
if not cmd:
job.status = JobStatus.FAILED
job.error_message = f"Unsupported framework: {job.framework}"
job.completed_at = datetime.now()
return job
# Execute with timeout
try:
logger.info(f"Running command: {' '.join(cmd)}")
result = subprocess.run(
cmd,
capture_output=True,
text=True,
timeout=job.timeout_seconds,
cwd=job.project_path,
)
job.output = result.stdout + result.stderr
# Save output to file
log_file = job_artifacts_dir / "output.log"
log_file.write_text(job.output)
job.log_path = str(log_file)
# Parse results
self._parse_results(job, job_artifacts_dir)
job.status = JobStatus.COMPLETE
job.completed_at = datetime.now()
logger.info(
f"Job {job.job_id} completed: {job.result} "
f"({job.tests_passed}/{job.tests_run} passed)"
)
except subprocess.TimeoutExpired:
job.status = JobStatus.TIMEOUT
job.error_message = f"Test execution exceeded {job.timeout_seconds}s timeout"
job.completed_at = datetime.now()
logger.warning(f"Job {job.job_id} timed out")
except Exception as e:
job.status = JobStatus.FAILED
job.error_message = str(e)
job.completed_at = datetime.now()
logger.error(f"Job {job.job_id} failed: {e}")
return job
def _build_command(self, job: TestJob, artifacts_dir: Path) -> Optional[List[str]]:
"""Build Godot command based on framework"""
if job.framework == "gdUnit4":
cmd = [
"godot",
"--path",
job.project_path,
"--headless",
"-s",
"res://addons/gdUnit4/bin/GdUnitCmdTool.gd",
"--ignoreHeadlessMode",
]
# Add test suite specification
if job.test_suite and job.test_suite != "all":
cmd.extend(["--test-suite", job.test_suite])
else:
cmd.extend(["-a", "test/"])
# Add JUnit XML output
junit_file = artifacts_dir / "results.xml"
cmd.extend(["--report-format", "junit", "--report-path", str(junit_file)])
job.junit_path = str(junit_file)
elif job.framework == "GUT":
cmd = [
"godot",
"--path",
job.project_path,
"--headless",
"-s",
"res://addons/gut/gut_cmdln.gd",
"-gdir=res://test",
]
if job.test_suite and job.test_suite != "all":
cmd.append(f"-gtest={job.test_suite}")
else:
return None
return cmd
def _parse_results(self, job: TestJob, artifacts_dir: Path):
"""Parse test results"""
if job.framework == "gdUnit4":
self._parse_gdunit4(job, artifacts_dir)
elif job.framework == "GUT":
self._parse_gut(job)
else:
# Fallback: parse from output
self._parse_generic(job)
def _parse_gdunit4(self, job: TestJob, artifacts_dir: Path):
"""Parse gdUnit4 JUnit XML results"""
junit_file = artifacts_dir / "results.xml"
if not junit_file.exists():
logger.warning(f"JUnit file not found: {junit_file}")
self._parse_generic(job)
return
try:
tree = ET.parse(junit_file)
root = tree.getroot()
# Extract summary
testsuite = root.find("testsuite")
if testsuite is not None:
job.tests_run = int(testsuite.get("tests", 0))
job.tests_failed = int(testsuite.get("failures", 0)) + int(
testsuite.get("errors", 0)
)
job.tests_passed = job.tests_run - job.tests_failed
job.result = "passed" if job.tests_failed == 0 else "failed"
except Exception as e:
logger.error(f"Failed to parse JUnit XML: {e}")
self._parse_generic(job)
def _parse_gut(self, job: TestJob):
"""Parse GUT plain text output"""
# GUT output format: "Tests run: X Passing: Y Failing: Z"
import re
match = re.search(r"Tests run:\s*(\d+)\s+Passing:\s*(\d+)\s+Failing:\s*(\d+)", job.output)
if match:
job.tests_run = int(match.group(1))
job.tests_passed = int(match.group(2))
job.tests_failed = int(match.group(3))
job.result = "passed" if job.tests_failed == 0 else "failed"
else:
self._parse_generic(job)
def _parse_generic(self, job: TestJob):
"""Fallback parser - look for common patterns"""
import re
# Try to find test counts in output
patterns = [
r"(\d+)\s+tests.*(\d+)\s+passed.*(\d+)\s+failed",
r"Tests:\s*(\d+),\s*Passed:\s*(\d+),\s*Failed:\s*(\d+)",
r"PASSED.*=\s*(\d+)",
]
for pattern in patterns:
match = re.search(pattern, job.output, re.IGNORECASE)
if match:
if len(match.groups()) >= 3:
job.tests_run = int(match.group(1))
job.tests_passed = int(match.group(2))
job.tests_failed = int(match.group(3))
elif len(match.groups()) == 1:
job.tests_passed = int(match.group(1))
job.tests_run = job.tests_passed
job.tests_failed = 0
job.result = "passed" if job.tests_failed == 0 else "failed"
return
# Couldn't parse - check exit code heuristic
if "All tests passed" in job.output or "OK" in job.output:
job.result = "passed"
else:
job.result = "failed"
# Global job queue and executor
job_queue = JobQueue()
executor = None # Initialized in main()
server_start_time = datetime.now()
total_jobs_processed = 0
def worker_thread():
"""Background worker that processes jobs from queue"""
global total_jobs_processed
logger.info("Worker thread started")
while True:
try:
# Get next job (blocks for 1 second)
job = job_queue.get_next(timeout=1.0)
if job:
# Execute the job
job = executor.execute(job)
# Update job in queue
job_queue.update_job(job)
total_jobs_processed += 1
# TODO: Send callback if specified
if job.callback_url:
try:
import requests
requests.post(job.callback_url, json=asdict(job), timeout=5)
except Exception as e:
logger.error(f"Failed to send callback: {e}")
except Exception as e:
logger.error(f"Worker thread error: {e}")
time.sleep(1)
# API Endpoints
@app.route("/test/submit", methods=["POST"])
def submit_test():
"""Submit a new test job"""
try:
data = request.json
# Validate required fields
if not data.get("project_path"):
return jsonify({"error": "project_path is required"}), 400
# Create job
job = TestJob(
job_id=str(uuid.uuid4()),
project_path=data["project_path"],
test_suite=data.get("test_suite", "all"),
framework=data.get("framework", "gdUnit4"),
timeout_seconds=data.get("timeout_seconds", DEFAULT_TIMEOUT),
agent_id=data.get("agent_id"),
task_id=data.get("task_id"),
callback_url=data.get("callback_url"),
priority=Priority[data.get("priority", "NORMAL")],
)
# Submit to queue
if not job_queue.submit(job):
return jsonify({"error": "Queue is full"}), 503
# Return response
position = job_queue.get_queue_position(job.job_id)
estimated_wait = position * 120 # Rough estimate: 2 min per job
return (
jsonify(
{
"job_id": job.job_id,
"status": job.status.value,
"queue_position": position,
"estimated_wait_seconds": estimated_wait,
}
),
202,
)
except Exception as e:
logger.error(f"Submit error: {e}")
return jsonify({"error": str(e)}), 500
@app.route("/test/status/<job_id>", methods=["GET"])
def get_status(job_id: str):
"""Get job status"""
job = job_queue.get_job(job_id)
if not job:
return jsonify({"error": "Job not found"}), 404
response = {
"job_id": job.job_id,
"status": job.status.value,
"submitted_at": job.submitted_at.isoformat() if job.submitted_at else None,
}
if job.status == JobStatus.QUEUED:
response["queue_position"] = job_queue.get_queue_position(job_id)
elif job.status == JobStatus.RUNNING:
response["started_at"] = job.started_at.isoformat() if job.started_at else None
if job.started_at:
elapsed = (datetime.now() - job.started_at).total_seconds()
response["elapsed_seconds"] = int(elapsed)
response["timeout_seconds"] = job.timeout_seconds
elif job.status in [JobStatus.COMPLETE, JobStatus.FAILED, JobStatus.TIMEOUT]:
response.update(
{
"result": job.result,
"started_at": job.started_at.isoformat() if job.started_at else None,
"completed_at": job.completed_at.isoformat() if job.completed_at else None,
"tests_run": job.tests_run,
"tests_passed": job.tests_passed,
"tests_failed": job.tests_failed,
}
)
if job.started_at and job.completed_at:
duration = (job.completed_at - job.started_at).total_seconds()
response["duration_seconds"] = int(duration)
if job.log_path:
response["artifacts"] = {"log": job.log_path, "junit": job.junit_path}
if job.error_message:
response["error_message"] = job.error_message
return jsonify(response)
@app.route("/test/results/<job_id>", methods=["GET"])
def get_results(job_id: str):
"""Get detailed test results"""
job = job_queue.get_job(job_id)
if not job:
return jsonify({"error": "Job not found"}), 404
if job.status not in [JobStatus.COMPLETE, JobStatus.FAILED]:
return jsonify({"error": "Job not complete"}), 400
return jsonify(
{
"job_id": job.job_id,
"result": job.result,
"summary": {
"total": job.tests_run,
"passed": job.tests_passed,
"failed": job.tests_failed,
},
"output": job.output,
"artifacts": {"log": job.log_path, "junit": job.junit_path},
}
)
@app.route("/test/cancel/<job_id>", methods=["DELETE"])
def cancel_test(job_id: str):
"""Cancel a queued or running test"""
if job_queue.cancel_job(job_id):
return jsonify(
{"job_id": job_id, "status": "cancelled", "cancelled_at": datetime.now().isoformat()}
)
else:
return jsonify({"error": "Cannot cancel job (not found or already running)"}), 400
@app.route("/health", methods=["GET"])
def health():
"""Health check endpoint"""
uptime = (datetime.now() - server_start_time).total_seconds()
# Get Godot version
godot_version = "unknown"
try:
result = subprocess.run(["godot", "--version"], capture_output=True, text=True, timeout=5)
if result.returncode == 0:
godot_version = result.stdout.strip()
except:
pass
return jsonify(
{
"status": "healthy",
"godot_version": godot_version,
"uptime_seconds": int(uptime),
"queue_depth": job_queue.get_queue_depth(),
"active_job": job_queue.active_job.job_id if job_queue.active_job else None,
"total_jobs_processed": total_jobs_processed,
}
)
@app.route("/queue", methods=["GET"])
def view_queue():
"""View current queue"""
active = None
if job_queue.active_job:
job = job_queue.active_job
active = {
"job_id": job.job_id,
"agent_id": job.agent_id,
"task_id": job.task_id,
"started_at": job.started_at.isoformat() if job.started_at else None,
}
if job.started_at:
active["elapsed_seconds"] = int((datetime.now() - job.started_at).total_seconds())
queued_jobs = []
with job_queue.lock:
queued = [j for j in job_queue.jobs.values() if j.status == JobStatus.QUEUED]
queued.sort(key=lambda j: j.submitted_at)
for i, job in enumerate(queued, 1):
queued_jobs.append(
{
"job_id": job.job_id,
"agent_id": job.agent_id,
"task_id": job.task_id,
"position": i,
"submitted_at": job.submitted_at.isoformat() if job.submitted_at else None,
}
)
return jsonify({"active": active, "queued": queued_jobs, "total_queued": len(queued_jobs)})
def main():
"""Main entry point"""
import argparse
global executor
parser = argparse.ArgumentParser(description="Godot Test Coordination Server")
parser.add_argument("--host", default="127.0.0.1", help="Host to bind to (default: 127.0.0.1)")
parser.add_argument("--port", type=int, default=5000, help="Port to listen on (default: 5000)")
parser.add_argument(
"--artifacts-dir",
default=DEFAULT_ARTIFACTS_DIR,
help=f"Directory for test artifacts (default: {DEFAULT_ARTIFACTS_DIR})",
)
parser.add_argument("--debug", action="store_true", help="Enable debug mode")
args = parser.parse_args()
# Initialize executor with artifacts directory
artifacts_path = Path(args.artifacts_dir)
executor = TestExecutor(artifacts_path)
# Start worker thread
worker = threading.Thread(target=worker_thread, daemon=True)
worker.start()
# Start Flask server
logger.info(f"Starting Godot Server on {args.host}:{args.port}")
logger.info(f"Artifacts directory: {artifacts_path}")
app.run(host=args.host, port=args.port, debug=args.debug, threaded=True)
if __name__ == "__main__":
main()
| #!/usr/bin/env python3
"""
Godot Server - Test Coordination Service
========================================
HTTP API server that queues and executes Godot tests sequentially,
allowing multiple Claude Code agents to coordinate test execution safely.
Features:
- REST API for test submission and status checking
- Sequential test execution (one at a time)
- Job queue management with priorities
- Test result parsing (gdUnit4, GUT)
- Timeout enforcement
- Resource monitoring
- Artifact storage
Usage:
python3 godot-server.py [--port 5000] [--host 127.0.0.1]
API Endpoints:
POST /test/submit - Submit new test job
GET /test/status/<id> - Check job status
GET /test/results/<id> - Get detailed results
DELETE /test/cancel/<id> - Cancel queued/running job
GET /health - Health check
GET /queue - View current queue
"""
import os
import sys
import json
import time
import uuid
import subprocess
import threading
import logging
import xml.etree.ElementTree as ET
from datetime import datetime, timedelta
from enum import Enum
from dataclasses import dataclass, asdict
from typing import Optional, Dict, List, Any
from queue import Queue, Empty
from pathlib import Path
try:
from flask import Flask, request, jsonify, send_file
import psutil
except ImportError:
print("Error: Required packages not installed")
print("Install: pip3 install flask psutil")
sys.exit(1)
# Configuration
# Default artifacts directory (can be overridden by env var or command line)
DEFAULT_ARTIFACTS_DIR = os.environ.get(
"LAZY_BIRD_ARTIFACTS | [] | yusufkaraaslan/lazy-bird | scripts/godot-server.py |
"""Task executor Celery task.
This module contains the task executor that:
- Receives a TaskRun ID to execute
- Creates git worktree for isolated execution
- Runs Claude Code CLI with task instructions
- Executes tests and validates results
- Creates pull request on success
- Updates TaskRun status and stores logs
- Publishes real-time logs via Redis Pub/Sub
This is the core execution engine for Lazy-Bird tasks.
"""
from datetime import datetime, timezone
from decimal import Decimal
from pathlib import Path
from typing import Dict, Any, Optional
from uuid import UUID
from sqlalchemy import select
from sqlalchemy.orm import selectinload
from lazy_bird.core.database import get_async_db
from lazy_bird.core.logging import get_logger
from lazy_bird.models.project import Project
from lazy_bird.models.task_run import TaskRun
from lazy_bird.services.claude_service import ClaudeService
from lazy_bird.services.git_service import GitService
from lazy_bird.services.log_publisher import LogPublisher
from lazy_bird.services.pr_service import PRService
from lazy_bird.services.test_runner import TestRunner
from lazy_bird.services.webhook_service import publish_event
from lazy_bird.tasks import app
logger = get_logger(__name__)
async def _fire_webhook(
db,
task_run,
event_type: str,
) -> None:
"""Fire webhook for a task state change (fire-and-forget).
Queries active WebhookSubscription records matching the event_type,
builds a payload from the task run, and delivers via WebhookService.
Wrapped in try/except so failures never block task execution.
Args:
db: AsyncSession database session
task_run: TaskRun model instance
event_type: Event type string (e.g. "task.running", "task.completed")
"""
try:
data = {
"task_run_id": str(task_run.id),
"project_id": str(task_run.project_id),
"status": task_run.status,
"work_item_id": task_run.work_item_id,
"work_item_title": task_run.work_item_title,
"pr_url": getattr(task_run, "pr_url", None),
"error_message": getattr(task_run, "error_message", None),
"cost_usd": float(task_run.cost_usd) if task_run.cost_usd else None,
"tokens_used": task_run.tokens_used,
"branch_name": getattr(task_run, "branch_name", None),
"duration_seconds": getattr(task_run, "duration_seconds", None),
"retry_count": getattr(task_run, "retry_count", None),
}
await publish_event(
event_type=event_type,
data=data,
db=db,
project_id=task_run.project_id,
)
except Exception:
logger.debug(
f"Webhook delivery failed for event {event_type} on task {task_run.id}",
exc_info=True,
)
@app.task(
name="lazy_bird.tasks.task_executor.execute_task",
bind=True,
max_retries=0, # Don't auto-retry (handle retries at task level)
soft_time_limit=3600, # 1 hour soft limit
time_limit=3900, # 65 minutes hard limit
)
def execute_task(self, task_run_id: str) -> Dict[str, Any]:
"""Execute a single TaskRun.
This is the main task execution function that:
1. Loads TaskRun from database
2. Creates git worktree for isolation
3. Runs Claude Code CLI with task prompt
4. Executes tests and collects results
5. Creates pull request if tests pass
6. Updates TaskRun status and logs
7. Cleans up worktree
Args:
task_run_id: UUID of TaskRun to execute
Returns:
dict: Execution result summary
- success: bool
- status: final status (completed, failed, cancelled)
- pr_url: URL of created pull request (if successful)
- error: Error message (if failed)
Example:
>>> result = execute_task("123e4567-e89b-12d3-a456-426614174000")
>>> print(result)
{
"success": True,
"status": "completed",
"pr_url": "https://github.com/user/repo/pull/42",
"error": None
}
"""
import asyncio
# Run async implementation
return asyncio.run(_execute_task_async(task_run_id))
async def _execute_task_async(task_run_id: str) -> Dict[str, Any]:
"""Async implementation of task execution.
Executes a task run with real-time log publishing to Redis Pub/Sub.
Full pipeline:
1. Load TaskRun from database, update status to "running"
2. Load related Project for config (test_command, repo_url, etc.)
3. Create git worktree via GitService
4. Run Claude Code CLI via ClaudeService with the task prompt
5. Execute tests via TestRunner using the project's test_command
6. If tests pass: commit, push, create PR via PRService
7. If tests fail and retries remain: update error_message, set status "failed"
8. Cleanup worktree in finally block
9. Update TaskRun with results (cost_usd, tokens_used, duration, pr_url, status)
10. Publish logs via LogPublisher at each step
"""
result = {
"success": False,
"status": "failed",
"pr_url": None,
"error": None,
}
# Create log publisher for this task
log_publisher = LogPublisher(use_async=True)
# Track worktree for cleanup in finally block
worktree_path: Optional[Path] = None
branch_name: Optional[str] = None
git_service: Optional[GitService] = None
# Get database session
async for db in get_async_db():
try:
# ------------------------------------------------------------------
# Step 1: Load TaskRun from database
# ------------------------------------------------------------------
task_run_uuid = UUID(task_run_id)
stmt = select(TaskRun).where(TaskRun.id == task_run_uuid)
db_result = await db.execute(stmt)
task_run = db_result.scalar_one_or_none()
if not task_run:
error_msg = f"TaskRun {task_run_id} not found"
logger.error(error_msg)
await log_publisher.publish_log_async(
message=error_msg,
level="ERROR",
task_id=task_run_id,
)
result["error"] = error_msg
return result
# Publish task start log
await log_publisher.publish_log_async(
message=f"Starting task execution: {task_run.work_item_title}",
level="INFO",
task_id=str(task_run.id),
project_id=str(task_run.project_id),
metadata={
"work_item_id": task_run.work_item_id,
"task_type": task_run.task_type,
"complexity": task_run.complexity,
},
)
logger.info(
f"Starting execution of task {task_run.id}",
extra={
"extra_fields": {
"task_run_id": str(task_run.id),
"project_id": str(task_run.project_id),
"work_item_id": task_run.work_item_id,
}
},
)
# Update status to running
task_run.status = "running"
task_run.started_at = datetime.now(timezone.utc)
await db.commit()
# Fire webhook for running state
await _fire_webhook(db, task_run, "task.running")
await log_publisher.publish_log_async(
message="Task status updated to 'running'",
level="INFO",
task_id=str(task_run.id),
project_id=str(task_run.project_id),
)
# ------------------------------------------------------------------
# Step 2: Load related Project for config
# ------------------------------------------------------------------
project = _load_project(task_run)
if project is None:
project_stmt = select(Project).where(Project.id == task_run.project_id)
project_result = await db.execute(project_stmt)
project = project_result.scalar_one_or_none()
# ------------------------------------------------------------------
# Step 3: Create git worktree (GitService)
# ------------------------------------------------------------------
await log_publisher.publish_log_async(
message="Creating git worktree for isolated execution...",
level="INFO",
task_id=str(task_run.id),
project_id=str(task_run.project_id),
)
if isinstance(project, Project):
# Extract project configuration
project_slug = project.slug
repo_url = project.repo_url
project_type = project.project_type
default_branch = project.default_branch
test_command = project.get_effective_test_command()
project_name = project.name
work_item_number = _extract_issue_number(str(task_run.work_item_id))
git_service = GitService(project_path=repo_url)
worktree_path, branch_name = git_service.create_worktree(
project_id=project_slug,
task_id=work_item_number,
base_branch=default_branch,
force=True,
)
# Store git details on task_run
task_run.branch_name = branch_name
task_run.worktree_path = str(worktree_path)
await db.commit()
await log_publisher.publish_log_async(
message=(
f"Git worktree created at {worktree_path} " f"on branch {branch_name}"
),
level="INFO",
task_id=str(task_run.id),
project_id=str(task_run.project_id),
metadata={
"worktree_path": str(worktree_path),
"branch_name": branch_name,
},
)
# ------------------------------------------------------------------
# Step 4: Run Claude Code CLI (ClaudeService)
# ------------------------------------------------------------------
await log_publisher.publish_log_async(
message="Running Claude Code CLI with task prompt...",
level="INFO",
task_id=str(task_run.id),
project_id=str(task_run.project_id),
)
claude_result: Optional[Dict[str, Any]] = None
if worktree_path and isinstance(project, Project):
claude_service = ClaudeService()
# Build the prompt from task details
prompt = claude_service.construct_task_prompt(
project_name=project_name,
project_type=project_type,
project_id=project_slug,
task_title=(task_run.work_item_title or task_run.work_item_id),
task_body=task_run.prompt,
working_directory=worktree_path,
)
claude_result = claude_service.execute_claude(
prompt=prompt,
working_directory=worktree_path,
project_id=project_slug,
task_id=work_item_number,
)
# Store Claude usage metrics
task_run.tokens_used = claude_result.get("tokens_used", 0)
task_run.cost_usd = Decimal(str(claude_result.get("cost", 0.0)))
await db.commit()
if not claude_result.get("success"):
raise RuntimeError(
f"Claude execution failed: "
f"{claude_result.get('error', 'Unknown error')}"
)
await log_publisher.publish_log_async(
message=(f"Claude execution completed " f"(tokens: {task_run.tokens_used})"),
level="INFO",
task_id=str(task_run.id),
project_id=str(task_run.project_id),
metadata={
"tokens_used": task_run.tokens_used,
"cost_usd": float(task_run.cost_usd),
"execution_time": claude_result.get("execution_time", 0),
},
)
# ------------------------------------------------------------------
# Step 5: Execute tests (TestRunner)
# ------------------------------------------------------------------
await log_publisher.publish_log_async(
message="Executing tests...",
level="INFO",
task_id=str(task_run.id),
project_id=str(task_run.project_id),
)
test_result: Optional[Dict[str, Any]] = None
if worktree_path and isinstance(project, Project) and test_command:
test_runner = TestRunner()
test_result = test_runner.run_tests(
test_command=test_command,
working_directory=worktree_path,
framework=project_type,
project_id=project_slug,
task_id=work_item_number,
)
task_run.tests_passed = test_result.get("success", False)
task_run.test_output = test_result.get("output", "")
await db.commit()
await log_publisher.publish_log_async(
message=(f"Tests " f"{'passed' if test_result['success'] else 'failed'}"),
level="INFO" if test_result["success"] else "WARNING",
task_id=str(task_run.id),
project_id=str(task_run.project_id),
metadata={
"test_stats": test_result.get("stats", {}),
"test_success": test_result["success"],
},
)
# If tests fail, attempt retry then fail
if not test_result["success"]:
test_result = await _handle_test_failure(
task_run=task_run,
test_result=test_result,
test_runner=test_runner,
claude_service=claude_service,
prompt=prompt,
worktree_path=worktree_path,
test_command=test_command,
project_type=project_type,
project_slug=project_slug,
work_item_number=work_item_number,
log_publisher=log_publisher,
db=db,
)
# If tests still fail after retry (or no retries)
if not test_result["success"]:
task_run.status = "failed"
task_run.error_message = test_runner.format_error_summary(test_result)
task_run.completed_at = datetime.now(timezone.utc)
if task_run.started_at and isinstance(task_run.started_at, datetime):
duration = task_run.completed_at - task_run.started_at
task_run.duration_seconds = int(duration.total_seconds())
await db.commit()
# Fire webhook for failed state
await _fire_webhook(db, task_run, "task.failed")
result["status"] = "failed"
result["error"] = task_run.error_message
await log_publisher.publish_log_async(
message="Task failed: tests did not pass",
level="ERROR",
task_id=str(task_run.id),
project_id=str(task_run.project_id),
metadata={"task_complete": True},
)
return result
# ------------------------------------------------------------------
# Step 6: Commit, push, and create PR (PRService)
# ------------------------------------------------------------------
await log_publisher.publish_log_async(
message="Creating pull request...",
level="INFO",
task_id=str(task_run.id),
project_id=str(task_run.project_id),
)
if worktree_path and git_service and isinstance(project, Project):
# Commit changes
commit_message = (
f"Task #{work_item_number}: "
f"{task_run.work_item_title or task_run.work_item_id}"
f"\n\nAutomated by Lazy-Bird\n"
f"Issue: {task_run.work_item_url or task_run.work_item_id}"
)
commit_sha = git_service.commit_changes(
worktree_path=worktree_path,
message=commit_message,
project_id=project_slug,
)
task_run.commit_sha = commit_sha
# Push branch
git_service.push_branch(
worktree_path=worktree_path,
project_id=project_slug,
)
# Get diff stats for PR body
diff_stats = git_service.get_diff_stats(worktree_path)
# Build PR body
pr_service = PRService(working_directory=worktree_path)
pr_body = pr_service.build_pr_body(
task_description=task_run.prompt,
implementation_summary=(
claude_result.get("output", "") if claude_result else ""
),
test_results=test_result,
diff_stats=diff_stats,
)
pr_title = (
f"Task #{work_item_number}: "
f"{task_run.work_item_title or task_run.work_item_id}"
)
pr_result = pr_service.create_pull_request(
title=pr_title,
body=pr_body,
base_branch=default_branch,
head_branch=branch_name,
labels=["automated", "lazy-bird"],
draft=True,
project_id=project_slug,
task_id=work_item_number,
issue_number=work_item_number,
)
task_run.pr_url = pr_result.get("url")
task_run.pr_number = pr_result.get("number")
result["pr_url"] = task_run.pr_url
await log_publisher.publish_log_async(
message=f"Pull request created: {task_run.pr_url}",
level="INFO",
task_id=str(task_run.id),
project_id=str(task_run.project_id),
metadata={
"pr_url": task_run.pr_url,
"pr_number": task_run.pr_number,
},
)
# ------------------------------------------------------------------
# Step 7: Cleanup worktree
# ------------------------------------------------------------------
await log_publisher.publish_log_async(
message="Cleaning up worktree...",
level="INFO",
task_id=str(task_run.id),
project_id=str(task_run.project_id),
)
if worktree_path and git_service:
try:
git_service.cleanup_worktree(
worktree_path=worktree_path,
branch_name=branch_name,
)
except Exception as cleanup_error:
logger.warning(
f"Worktree cleanup failed: {cleanup_error}",
exc_info=True,
)
# Clear references so finally block doesn't double-clean
worktree_path = None
branch_name = None
# ------------------------------------------------------------------
# Step 8: Mark task as completed
# ------------------------------------------------------------------
task_run.status = "completed"
task_run.completed_at = datetime.now(timezone.utc)
if task_run.started_at and isinstance(task_run.started_at, datetime):
duration = task_run.completed_at - task_run.started_at
task_run.duration_seconds = int(duration.total_seconds())
await db.commit()
# Fire webhook for completed state
await _fire_webhook(db, task_run, "task.completed")
result["success"] = True
result["status"] = "completed"
# Publish completion log
await log_publisher.publish_log_async(
message="Task execution completed successfully",
level="INFO",
task_id=str(task_run.id),
project_id=str(task_run.project_id),
metadata={"task_complete": True},
)
logger.info(
f"Task {task_run.id} execution completed",
extra={
"extra_fields": {
"task_run_id": str(task_run.id),
"status": task_run.status,
"pr_url": getattr(task_run, "pr_url", None),
}
},
)
return result
except Exception as e:
error_msg = f"Task execution failed: {str(e)}"
logger.error(
error_msg,
extra={
"extra_fields": {
"task_run_id": task_run_id,
"error": str(e),
}
},
exc_info=True,
)
# Publish error log
await log_publisher.publish_log_async(
message=error_msg,
level="ERROR",
task_id=task_run_id,
metadata={"error_type": type(e).__name__, "error": str(e)},
)
result["error"] = error_msg
# Update task status to failed
try:
task_run.status = "failed"
task_run.error_message = str(e)
task_run.completed_at = datetime.now(timezone.utc)
await db.commit()
# Fire webhook for failed state
await _fire_webhook(db, task_run, "task.failed")
await log_publisher.publish_log_async(
message="Task marked as failed",
level="ERROR",
task_id=str(task_run.id),
project_id=str(task_run.project_id),
metadata={"task_complete": True},
)
except Exception as update_error:
logger.error(
f"Failed to update task status: {str(update_error)}",
exc_info=True,
)
await log_publisher.publish_log_async(
message=f"Failed to update task status: {str(update_error)}",
level="CRITICAL",
task_id=task_run_id,
)
return result
finally:
# ------------------------------------------------------------------
# Cleanup worktree in finally block (if not already cleaned)
# ------------------------------------------------------------------
if worktree_path and git_service:
try:
git_service.cleanup_worktree(
worktree_path=worktree_path,
branch_name=branch_name,
)
except Exception as cleanup_error:
logger.warning(
f"Worktree cleanup failed: {cleanup_error}",
exc_info=True,
)
def _load_project(task_run) -> Optional[Project]:
"""Load Project from TaskRun's eager-loaded relationship.
Returns the project only if it's a real Project instance
(not a Mock or other surrogate).
Args:
task_run: TaskRun instance (possibly with eager-loaded project)
Returns:
Optional[Project]: The Project instance, or None
"""
project = getattr(task_run, "project", None)
if isinstance(project, Project):
return project
return None
async def _handle_test_failure(
task_run,
test_result: Dict[str, Any],
test_runner: TestRunner,
claude_service: ClaudeService,
prompt: str,
worktree_path: Path,
test_command: str,
project_type: str,
project_slug: str,
work_item_number: int,
log_publisher: LogPublisher,
db,
) -> Dict[str, Any]:
"""Handle test failure with optional retry.
If the task has retries remaining, re-runs Claude with error context
and re-runs tests.
Args:
task_run: TaskRun instance
test_result: Initial failed test result
test_runner: TestRunner instance
claude_service: ClaudeService instance
prompt: Original prompt
worktree_path: Path to worktree
test_command: Test command to run
project_type: Project framework type
project_slug: Project slug
work_item_number: Issue number
log_publisher: LogPublisher instance
db: Database session
Returns:
Dict[str, Any]: Final test result (may be from retry)
"""
error_summary = test_runner.format_error_summary(test_result)
task_run.error_message = error_summary
if task_run.can_retry():
task_run.retry_count += 1
await db.commit()
await log_publisher.publish_log_async(
message=(f"Tests failed, retry " f"{task_run.retry_count}/{task_run.max_retries}"),
level="WARNING",
task_id=str(task_run.id),
project_id=str(task_run.project_id),
)
# Retry: re-run Claude with error context
claude_result = claude_service.execute_claude(
prompt=prompt,
working_directory=worktree_path,
project_id=project_slug,
task_id=work_item_number,
error_context=error_summary,
)
# Update tokens/cost with retry usage
task_run.tokens_used = (task_run.tokens_used or 0) + claude_result.get("tokens_used", 0)
task_run.cost_usd = Decimal(
str(float(task_run.cost_usd or 0) + claude_result.get("cost", 0.0))
)
# Re-run tests
test_result = test_runner.run_tests(
test_command=test_command,
working_directory=worktree_path,
framework=project_type,
project_id=project_slug,
task_id=work_item_number,
)
task_run.tests_passed = test_result.get("success", False)
task_run.test_output = test_result.get("output", "")
await db.commit()
return test_result
def _extract_issue_number(work_item_id: str) -> int:
"""Extract numeric issue number from work_item_id.
Args:
work_item_id: Work item identifier (e.g., "issue-42", "JIRA-123", "42")
Returns:
int: Extracted issue number, or 0 if not found
"""
import re
match = re.search(r"(\d+)", work_item_id)
if match:
return int(match.group(1))
return 0
__all__ = ["execute_task"]
| """Task executor Celery task.
This module contains the task executor that:
- Receives a TaskRun ID to execute
- Creates git worktree for isolated execution
- Runs Claude Code CLI with task instructions
- Executes tests and validates results
- Creates pull request on success
- Updates TaskRun status and stores logs
- Publishes real-time logs via Redis Pub/Sub
This is the core execution engine for Lazy-Bird tasks.
"""
from datetime import datetime, timezone
from decimal import Decimal
from pathlib import Path
from typing import Dict, Any, Optional
from uuid import UUID
from sqlalchemy import select
from sqlalchemy.orm import selectinload
from lazy_bird.core.database import get_async_db
from lazy_bird.core.logging import get_logger
from lazy_bird.models.project import Project
from lazy_bird.models.task_run import TaskRun
from lazy_bird.services.claude_service import ClaudeService
from lazy_bird.services.git_service import GitService
from lazy_bird.services.log_publisher import LogPublisher
from lazy_bird.services.pr_service import PRService
from lazy_bird.services.test_runner import TestRunner
from lazy_bird.services.webhook_service import publish_event
from lazy_bird.tasks import app
logger = get_logger(__name__)
async def _fire_webhook(
db,
task_run,
event_type: str,
) -> None:
"""Fire webhook for a task state change (fire-and-forget).
Queries active WebhookSubscription records matching the event_type,
builds a payload from the task run, and delivers via WebhookService.
Wrapped in try/except so failures never block task execution.
Args:
db: AsyncSession database session
task_run: TaskRun model instance
event_type: Event type string (e.g. "task.running", | [
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/sql/_selectable_constructors.py\nselect",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/orm/strategy_options.py\nselectinload",
"# yusufkaraaslan/lazy-bird:lazy_bird/core/database.py\nget_async_db",
"# yusufkaraaslan/lazy-bird:lazy_bird/core/logging.py\nget_logger",
"# yusu... | yusufkaraaslan/lazy-bird | lazy_bird/tasks/task_executor.py |
"""Queue processor Celery task.
This module contains the queue processor task that:
- Polls the database for queued TaskRun records
- Triggers task execution for each queued task
- Handles concurrency limits and task prioritization
- Updates task status and logs errors
The queue processor runs periodically (every 60 seconds by default)
via Celery Beat scheduler.
"""
from datetime import datetime, timezone
from typing import List
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from lazy_bird.core.config import settings
from lazy_bird.core.database import get_async_db
from lazy_bird.core.logging import get_logger
from lazy_bird.models.task_run import TaskRun
from lazy_bird.tasks import app
logger = get_logger(__name__)
@app.task(
name="lazy_bird.tasks.queue_processor.process_queue",
bind=True,
max_retries=3,
default_retry_delay=30,
)
def process_queue(self):
"""Process queued tasks and trigger execution.
This task runs periodically to:
1. Query database for TaskRun records with status='queued'
2. Check concurrency limits (MAX_PARALLEL_TASKS)
3. Trigger execute_task for each queued task
4. Update task status to 'running'
5. Handle errors and log results
Returns:
dict: Processing summary with counts
- queued_count: Number of tasks found in queue
- triggered_count: Number of tasks triggered
- skipped_count: Number of tasks skipped (concurrency limit)
- errors: List of errors encountered
Example:
>>> result = process_queue()
>>> print(result)
{
"queued_count": 5,
"triggered_count": 3,
"skipped_count": 2,
"errors": []
}
"""
import asyncio
# Run async function in sync context
return asyncio.run(_process_queue_async())
async def _process_queue_async() -> dict:
"""Async implementation of queue processing with intelligent task selection.
Enhancements (Issue #105):
- Respects per-project max_concurrent_tasks limits
- Enforces daily cost limits per project
- Complexity-based prioritization (simple tasks first)
"""
from decimal import Decimal
from lazy_bird.tasks.task_executor import execute_task
from lazy_bird.models.project import Project
summary = {
"queued_count": 0,
"triggered_count": 0,
"skipped_count": 0,
"skipped_cost_limit": 0,
"skipped_concurrency": 0,
"errors": [],
}
# Get async database session
async for db in get_async_db():
try:
# Query for queued tasks (ordered by complexity ASC, created_at ASC)
# Simple tasks first for faster throughput
stmt = (
select(TaskRun)
.where(TaskRun.status == "queued")
.order_by(
TaskRun.complexity.asc(), # simple < medium < complex
TaskRun.created_at.asc(), # FIFO within same complexity
)
.limit(100) # Process max 100 tasks per run
)
result = await db.execute(stmt)
queued_tasks: List[TaskRun] = list(result.scalars().all())
summary["queued_count"] = len(queued_tasks)
logger.info(
f"Queue processor: Found {len(queued_tasks)} queued tasks",
extra={"extra_fields": {"queued_count": len(queued_tasks)}},
)
if not queued_tasks:
logger.debug("Queue processor: No queued tasks found")
return summary
# Check global concurrency limit
running_stmt = select(TaskRun).where(TaskRun.status == "running")
running_result = await db.execute(running_stmt)
global_running_count = len(list(running_result.scalars().all()))
max_global_parallel = settings.MAX_PARALLEL_TASKS
global_available = max_global_parallel - global_running_count
logger.info(
f"Queue processor: {global_running_count} tasks running globally, "
f"{global_available} global slots available (max: {max_global_parallel})"
)
if global_available <= 0:
logger.warning(
f"Queue processor: No global slots available (max: {max_global_parallel})"
)
summary["skipped_count"] = len(queued_tasks)
summary["skipped_concurrency"] = len(queued_tasks)
return summary
# Track per-project state for intelligent selection
project_running_counts = {}
project_daily_costs = {}
project_configs = {}
# Iterate through queued tasks and apply selection logic
for task in queued_tasks:
# Check if we've reached global limit
if summary["triggered_count"] >= global_available:
summary["skipped_count"] += 1
summary["skipped_concurrency"] += 1
continue
try:
# Load project config if not cached
if task.project_id not in project_configs:
project_stmt = select(Project).where(Project.id == task.project_id)
project_result = await db.execute(project_stmt)
project = project_result.scalar_one_or_none()
if not project:
logger.warning(
f"Project {task.project_id} not found for task {task.id}"
)
summary["skipped_count"] += 1
continue
project_configs[task.project_id] = project
project = project_configs[task.project_id]
# 1. Check per-project concurrency limit
if task.project_id not in project_running_counts:
# Query running tasks for this project
project_running_stmt = (
select(TaskRun)
.where(TaskRun.project_id == task.project_id)
.where(TaskRun.status == "running")
)
project_running_result = await db.execute(project_running_stmt)
project_running_counts[task.project_id] = len(
list(project_running_result.scalars().all())
)
project_running = project_running_counts[task.project_id]
if project_running >= project.max_concurrent_tasks:
logger.info(
f"Skipping task {task.id}: project {task.project_id} "
f"at concurrency limit ({project_running}/{project.max_concurrent_tasks})"
)
summary["skipped_count"] += 1
summary["skipped_concurrency"] += 1
continue
# 2. Check daily cost limit
if task.project_id not in project_daily_costs:
# Query today's cost for this project
from sqlalchemy import func
today_start = datetime.now(timezone.utc).replace(
hour=0, minute=0, second=0, microsecond=0
)
cost_stmt = (
select(func.coalesce(func.sum(TaskRun.cost_usd), Decimal("0.00")))
.where(TaskRun.project_id == task.project_id)
.where(TaskRun.started_at >= today_start)
)
cost_result = await db.execute(cost_stmt)
project_daily_costs[task.project_id] = cost_result.scalar()
daily_cost = project_daily_costs[task.project_id]
if daily_cost >= project.daily_cost_limit_usd:
logger.warning(
f"Skipping task {task.id}: project {task.project_id} "
f"hit daily cost limit (${daily_cost}/${project.daily_cost_limit_usd})"
)
summary["skipped_count"] += 1
summary["skipped_cost_limit"] += 1
continue
# All checks passed - trigger task
execute_task.delay(str(task.id))
# Update status to 'running' immediately to prevent double-processing
task.status = "running"
task.started_at = datetime.now(timezone.utc)
await db.commit()
# Update counters
summary["triggered_count"] += 1
project_running_counts[task.project_id] = project_running + 1
logger.info(
f"Queue processor: Triggered task {task.id}",
extra={
"extra_fields": {
"task_run_id": str(task.id),
"project_id": str(task.project_id),
"complexity": task.complexity,
"daily_cost": float(daily_cost),
"cost_limit": float(project.daily_cost_limit_usd),
}
},
)
except Exception as e:
error_msg = f"Failed to process task {task.id}: {str(e)}"
logger.error(
error_msg,
extra={
"extra_fields": {
"task_run_id": str(task.id),
"error": str(e),
}
},
exc_info=True,
)
summary["errors"].append(error_msg)
logger.info(
f"Queue processor: Summary - "
f"triggered: {summary['triggered_count']}, "
f"skipped: {summary['skipped_count']} "
f"(cost_limit: {summary['skipped_cost_limit']}, "
f"concurrency: {summary['skipped_concurrency']}), "
f"errors: {len(summary['errors'])}"
)
return summary
except Exception as e:
logger.error(
f"Queue processor error: {str(e)}",
extra={"extra_fields": {"error": str(e)}},
exc_info=True,
)
summary["errors"].append(str(e))
return summary
@app.task(
name="lazy_bird.tasks.queue_processor.requeue_stale_tasks",
bind=True,
)
def requeue_stale_tasks(self):
"""Requeue tasks that have been 'running' for too long without updates.
This task finds TaskRuns that have been in 'running' status for longer
than TASK_TIMEOUT_SECONDS and requeues them for execution.
This handles edge cases where:
- Worker crashed mid-execution
- Task was killed without updating status
- Network issues prevented status update
Returns:
dict: Summary of requeued tasks
- stale_count: Number of stale tasks found
- requeued_count: Number of tasks requeued
- errors: List of errors encountered
"""
import asyncio
return asyncio.run(_requeue_stale_tasks_async())
async def _requeue_stale_tasks_async() -> dict:
"""Async implementation of stale task requeuing."""
summary = {
"stale_count": 0,
"requeued_count": 0,
"errors": [],
}
# Get async database session
async for db in get_async_db():
try:
# Calculate timeout threshold
timeout_threshold = datetime.now(timezone.utc) - timedelta(
seconds=settings.TASK_TIMEOUT_SECONDS
)
# Query for stale running tasks
stmt = (
select(TaskRun)
.where(TaskRun.status == "running")
.where(TaskRun.started_at < timeout_threshold)
.limit(50) # Limit to prevent overwhelming system
)
result = await db.execute(stmt)
stale_tasks: List[TaskRun] = list(result.scalars().all())
summary["stale_count"] = len(stale_tasks)
if not stale_tasks:
logger.debug("Stale task checker: No stale tasks found")
return summary
logger.warning(
f"Stale task checker: Found {len(stale_tasks)} stale tasks",
extra={"extra_fields": {"stale_count": len(stale_tasks)}},
)
# Requeue stale tasks
for task in stale_tasks:
try:
# Update status back to queued
task.status = "queued"
task.started_at = None
task.error = (
"Task timed out and was automatically requeued. "
f"Original start time: {task.started_at}"
)
await db.commit()
summary["requeued_count"] += 1
logger.warning(
f"Requeued stale task {task.id}",
extra={
"extra_fields": {
"task_run_id": str(task.id),
"started_at": (
task.started_at.isoformat() if task.started_at else None
),
}
},
)
except Exception as e:
error_msg = f"Failed to requeue stale task {task.id}: {str(e)}"
logger.error(error_msg, exc_info=True)
summary["errors"].append(error_msg)
logger.info(
f"Stale task checker: Requeued {summary['requeued_count']} of {summary['stale_count']} stale tasks"
)
return summary
except Exception as e:
logger.error(f"Stale task checker error: {str(e)}", exc_info=True)
summary["errors"].append(str(e))
return summary
# Import timedelta for stale task requeuing
from datetime import timedelta
| """Queue processor Celery task.
This module contains the queue processor task that:
- Polls the database for queued TaskRun records
- Triggers task execution for each queued task
- Handles concurrency limits and task prioritization
- Updates task status and logs errors
The queue processor runs periodically (every 60 seconds by default)
via Celery Beat scheduler.
"""
from datetime import datetime, timezone
from typing import List
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from lazy_bird.core.config import settings
from lazy_bird.core.database import get_async_db
from lazy_bird.core.logging import get_logger
from lazy_bird.models.task_run import TaskRun
from lazy_bird.tasks import app
logger = get_logger(__name__)
@app.task(
name="lazy_bird.tasks.queue_processor.process_queue",
bind=True,
max_retries=3,
default_retry_delay=30,
)
def process_queue(self):
"""Process queued tasks and trigger execution.
This task runs periodically to:
1. Query database for TaskRun records with status='queued'
2. Check concurrency limits (MAX_PARALLEL_TASKS)
3. Trigger execute_task for each queued task
4. Update task status to 'running'
5. Handle errors and log results
Returns:
dict: Processing summary with counts
- queued_count: Number of tasks found in queue
- triggered_count: Number of tasks triggered
- skipped_count: Number of tasks skipped (concurrency limit)
- errors: List of errors encountered
Example:
>>> result = process_queue()
| [
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/sql/_selectable_constructors.py\nselect",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/ext/asyncio/session.py\nAsyncSession",
"# yusufkaraaslan/lazy-bird:lazy_bird/core/database.py\nget_async_db",
"# yusufkaraaslan/lazy-bird:lazy_bird/core/logging.py\nget_logger",
"# yusuf... | yusufkaraaslan/lazy-bird | lazy_bird/tasks/queue_processor.py |
"""Issue watcher Celery task.
This module contains the issue watcher task that:
- Polls GitHub/GitLab APIs for issues labeled 'ready'
- Creates TaskRun records for new issues
- Updates issue labels (removes 'ready', adds 'processing')
- Runs periodically via Celery Beat scheduler (every 60 seconds)
"""
import re
import uuid
from typing import Any, Dict, List, Optional, Tuple
import httpx
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from lazy_bird.core.config import settings
from lazy_bird.core.database import get_async_db
from lazy_bird.core.logging import get_logger
from lazy_bird.models.project import Project
from lazy_bird.models.task_run import TaskRun
from lazy_bird.tasks import app
logger = get_logger(__name__)
def _parse_github_repo(repo_url: str) -> Optional[Tuple[str, str]]:
"""Parse owner and repo from a GitHub repository URL.
Supports formats:
- https://github.com/owner/repo
- https://github.com/owner/repo.git
- git@github.com:owner/repo.git
Args:
repo_url: Git repository URL
Returns:
Tuple of (owner, repo) or None if not parseable
"""
# HTTPS format
match = re.match(r"https?://github\.com/([^/]+)/([^/.]+)(?:\.git)?", repo_url)
if match:
return match.group(1), match.group(2)
# SSH format
match = re.match(r"git@github\.com:([^/]+)/([^/.]+)(?:\.git)?", repo_url)
if match:
return match.group(1), match.group(2)
return None
def _parse_gitlab_repo(repo_url: str) -> Optional[str]:
"""Parse project path from a GitLab repository URL.
Supports formats:
- https://gitlab.com/owner/repo
- https://gitlab.com/group/subgroup/repo
- https://gitlab.com/owner/repo.git
Args:
repo_url: Git repository URL
Returns:
URL-encoded project path or None if not parseable
"""
match = re.match(r"https?://[^/]+/(.+?)(?:\.git)?$", repo_url)
if match:
return match.group(1).replace("/", "%2F")
return None
def _detect_platform(project: Project) -> Optional[str]:
"""Detect the source platform for a project.
Uses source_platform field first, then falls back to repo_url inspection.
Args:
project: Project model instance
Returns:
Platform string ('github' or 'gitlab') or None
"""
if project.source_platform:
return project.source_platform.lower()
if project.repo_url:
if "github.com" in project.repo_url:
return "github"
if "gitlab" in project.repo_url:
return "gitlab"
return None
async def _fetch_github_issues(
client: httpx.AsyncClient,
owner: str,
repo: str,
token: str,
) -> List[Dict[str, Any]]:
"""Fetch open issues with 'ready' label from GitHub.
Args:
client: httpx async client
owner: Repository owner
repo: Repository name
token: GitHub API token
Returns:
List of issue dicts from GitHub API
"""
url = f"https://api.github.com/repos/{owner}/{repo}/issues"
headers = {
"Authorization": f"token {token}",
"Accept": "application/vnd.github.v3+json",
}
params = {
"labels": "ready",
"state": "open",
}
response = await client.get(url, headers=headers, params=params)
response.raise_for_status()
return response.json()
async def _update_github_labels(
client: httpx.AsyncClient,
owner: str,
repo: str,
issue_number: int,
token: str,
) -> None:
"""Remove 'ready' label and add 'processing' label on a GitHub issue.
Args:
client: httpx async client
owner: Repository owner
repo: Repository name
issue_number: Issue number
token: GitHub API token
"""
headers = {
"Authorization": f"token {token}",
"Accept": "application/vnd.github.v3+json",
}
# Remove 'ready' label
remove_url = f"https://api.github.com/repos/{owner}/{repo}/issues/{issue_number}/labels/ready"
try:
await client.delete(remove_url, headers=headers)
except httpx.HTTPStatusError:
logger.warning(f"Failed to remove 'ready' label from {owner}/{repo}#{issue_number}")
# Add 'processing' label
add_url = f"https://api.github.com/repos/{owner}/{repo}/issues/{issue_number}/labels"
try:
await client.post(add_url, headers=headers, json={"labels": ["processing"]})
except httpx.HTTPStatusError:
logger.warning(f"Failed to add 'processing' label to {owner}/{repo}#{issue_number}")
async def _fetch_gitlab_issues(
client: httpx.AsyncClient,
project_path: str,
token: str,
) -> List[Dict[str, Any]]:
"""Fetch open issues with 'ready' label from GitLab.
Args:
client: httpx async client
project_path: URL-encoded project path
token: GitLab API token
Returns:
List of issue dicts from GitLab API
"""
url = f"https://gitlab.com/api/v4/projects/{project_path}/issues"
headers = {
"PRIVATE-TOKEN": token,
}
params = {
"labels": "ready",
"state": "opened",
}
response = await client.get(url, headers=headers, params=params)
response.raise_for_status()
return response.json()
async def _update_gitlab_labels(
client: httpx.AsyncClient,
project_path: str,
issue_iid: int,
token: str,
) -> None:
"""Remove 'ready' label and add 'processing' label on a GitLab issue.
Args:
client: httpx async client
project_path: URL-encoded project path
issue_iid: Issue IID (internal ID)
token: GitLab API token
"""
url = f"https://gitlab.com/api/v4/projects/{project_path}/issues/{issue_iid}"
headers = {
"PRIVATE-TOKEN": token,
}
try:
await client.put(
url,
headers=headers,
json={
"remove_labels": "ready",
"add_labels": "processing",
},
)
except httpx.HTTPStatusError:
logger.warning(f"Failed to update labels on GitLab issue {project_path}#{issue_iid}")
@app.task(
name="lazy_bird.tasks.issue_watcher.watch_issues",
bind=True,
max_retries=3,
default_retry_delay=30,
)
def watch_issues(self):
"""Watch for new issues with 'ready' label and create TaskRuns.
This task runs periodically to:
1. Query all active projects with automation_enabled=True
2. For each project, fetch issues labeled 'ready' from GitHub/GitLab
3. Check if a TaskRun already exists for each issue
4. Create new TaskRun records for unprocessed issues
5. Update issue labels (remove 'ready', add 'processing')
Returns:
dict: Processing summary with counts
"""
import asyncio
return asyncio.run(_watch_issues_async())
async def _watch_issues_async() -> dict:
"""Async implementation of issue watching."""
summary = {
"projects_checked": 0,
"issues_found": 0,
"task_runs_created": 0,
"issues_skipped": 0,
"errors": [],
}
async for db in get_async_db():
try:
# Query active projects with automation enabled
stmt = (
select(Project)
.where(Project.automation_enabled.is_(True))
.where(Project.deleted_at.is_(None))
)
result = await db.execute(stmt)
projects: List[Project] = list(result.scalars().all())
logger.info(
f"Issue watcher: Found {len(projects)} active projects",
extra={"extra_fields": {"project_count": len(projects)}},
)
if not projects:
logger.debug("Issue watcher: No active projects found")
return summary
async with httpx.AsyncClient(timeout=30.0) as client:
for project in projects:
try:
await _process_project_issues(client, db, project, summary)
summary["projects_checked"] += 1
except Exception as e:
error_msg = (
f"Failed to process issues for project "
f"{project.id} ({project.name}): {str(e)}"
)
logger.error(
error_msg,
extra={
"extra_fields": {
"project_id": str(project.id),
"error": str(e),
}
},
exc_info=True,
)
summary["errors"].append(error_msg)
logger.info(
f"Issue watcher: Summary - "
f"projects: {summary['projects_checked']}, "
f"issues found: {summary['issues_found']}, "
f"created: {summary['task_runs_created']}, "
f"skipped: {summary['issues_skipped']}, "
f"errors: {len(summary['errors'])}"
)
return summary
except Exception as e:
logger.error(
f"Issue watcher error: {str(e)}",
extra={"extra_fields": {"error": str(e)}},
exc_info=True,
)
summary["errors"].append(str(e))
return summary
async def _process_project_issues(
client: httpx.AsyncClient,
db: AsyncSession,
project: Project,
summary: dict,
) -> None:
"""Process issues for a single project.
Args:
client: httpx async client
db: Async database session
project: Project to check for issues
summary: Summary dict to update counts
"""
platform = _detect_platform(project)
if platform == "github":
token = settings.GITHUB_TOKEN
if not token:
logger.warning(f"No GITHUB_TOKEN configured, skipping project {project.name}")
return
parsed = _parse_github_repo(project.repo_url)
if not parsed:
logger.warning(f"Could not parse GitHub repo from URL: {project.repo_url}")
return
owner, repo = parsed
issues = await _fetch_github_issues(client, owner, repo, token)
for issue in issues:
summary["issues_found"] += 1
issue_number = issue["number"]
work_item_id = str(issue_number)
# Check if TaskRun already exists for this issue
existing_stmt = (
select(TaskRun)
.where(TaskRun.project_id == project.id)
.where(TaskRun.work_item_id == work_item_id)
)
existing_result = await db.execute(existing_stmt)
existing = existing_result.scalar_one_or_none()
if existing:
summary["issues_skipped"] += 1
logger.debug(f"Issue #{issue_number} already has TaskRun, skipping")
continue
# Create new TaskRun
task_run = TaskRun(
id=uuid.uuid4(),
project_id=project.id,
work_item_id=work_item_id,
work_item_url=issue.get("html_url"),
work_item_title=issue.get("title"),
work_item_description=issue.get("body"),
prompt=issue.get("body") or issue.get("title", ""),
status="queued",
task_type="feature",
)
db.add(task_run)
await db.commit()
# Update issue labels
await _update_github_labels(client, owner, repo, issue_number, token)
summary["task_runs_created"] += 1
logger.info(
f"Created TaskRun for GitHub issue #{issue_number} " f"in project {project.name}",
extra={
"extra_fields": {
"project_id": str(project.id),
"issue_number": issue_number,
"task_run_id": str(task_run.id),
}
},
)
elif platform == "gitlab":
token = settings.GITLAB_TOKEN
if not token:
logger.warning(f"No GITLAB_TOKEN configured, skipping project {project.name}")
return
project_path = _parse_gitlab_repo(project.repo_url)
if not project_path:
logger.warning(f"Could not parse GitLab project from URL: {project.repo_url}")
return
issues = await _fetch_gitlab_issues(client, project_path, token)
for issue in issues:
summary["issues_found"] += 1
issue_iid = issue["iid"]
work_item_id = str(issue_iid)
# Check if TaskRun already exists
existing_stmt = (
select(TaskRun)
.where(TaskRun.project_id == project.id)
.where(TaskRun.work_item_id == work_item_id)
)
existing_result = await db.execute(existing_stmt)
existing = existing_result.scalar_one_or_none()
if existing:
summary["issues_skipped"] += 1
logger.debug(f"GitLab issue !{issue_iid} already has TaskRun, skipping")
continue
# Create new TaskRun
task_run = TaskRun(
id=uuid.uuid4(),
project_id=project.id,
work_item_id=work_item_id,
work_item_url=issue.get("web_url"),
work_item_title=issue.get("title"),
work_item_description=issue.get("description"),
prompt=issue.get("description") or issue.get("title", ""),
status="queued",
task_type="feature",
)
db.add(task_run)
await db.commit()
# Update issue labels
await _update_gitlab_labels(client, project_path, issue_iid, token)
summary["task_runs_created"] += 1
logger.info(
f"Created TaskRun for GitLab issue !{issue_iid} " f"in project {project.name}",
extra={
"extra_fields": {
"project_id": str(project.id),
"issue_iid": issue_iid,
"task_run_id": str(task_run.id),
}
},
)
else:
logger.warning(
f"Unknown platform for project {project.name} "
f"(source_platform={project.source_platform}, "
f"repo_url={project.repo_url})"
)
| """Issue watcher Celery task.
This module contains the issue watcher task that:
- Polls GitHub/GitLab APIs for issues labeled 'ready'
- Creates TaskRun records for new issues
- Updates issue labels (removes 'ready', adds 'processing')
- Runs periodically via Celery Beat scheduler (every 60 seconds)
"""
import re
import uuid
from typing import Any, Dict, List, Optional, Tuple
import httpx
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from lazy_bird.core.config import settings
from lazy_bird.core.database import get_async_db
from lazy_bird.core.logging import get_logger
from lazy_bird.models.project import Project
from lazy_bird.models.task_run import TaskRun
from lazy_bird.tasks import app
logger = get_logger(__name__)
def _parse_github_repo(repo_url: str) -> Optional[Tuple[str, str]]:
"""Parse owner and repo from a GitHub repository URL.
Supports formats:
- https://github.com/owner/repo
- https://github.com/owner/repo.git
- git@github.com:owner/repo.git
Args:
repo_url: Git repository URL
Returns:
Tuple of (owner, repo) or None if not parseable
"""
# HTTPS format
match = re.match(r"https?://github\.com/([^/]+)/([^/.]+)(?:\.git)?", repo_url)
if match:
return match.group(1), match.group(2)
# SSH format
match = re.match(r"git@github\.com:([^/]+)/([^/.]+)(?:\.git)?", repo_url)
if match:
| [
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/sql/_selectable_constructors.py\nselect",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/ext/asyncio/session.py\nAsyncSession",
"# yusufkaraaslan/lazy-bird:lazy_bird/core/database.py\nget_async_db",
"# yusufkaraaslan/lazy-bird:lazy_bird/core/logging.py\nget_logger",
"# yusuf... | yusufkaraaslan/lazy-bird | lazy_bird/tasks/issue_watcher.py |
"""Celery configuration for Lazy-Bird.
This module contains all Celery configuration settings:
- Broker and result backend URLs
- Task serialization and result formats
- Task routing and queue configuration
- Worker settings and limits
- Beat schedule for periodic tasks
"""
from kombu import Queue
from lazy_bird.core.config import settings
# -----------------------------------------------------------------------------
# BROKER SETTINGS
# -----------------------------------------------------------------------------
# Redis broker URL
broker_url = settings.CELERY_BROKER_URL
# Result backend URL (separate Redis DB for results)
result_backend = settings.CELERY_RESULT_BACKEND
# Connection settings
broker_connection_retry_on_startup = True
broker_connection_retry = True
broker_connection_max_retries = 10
# -----------------------------------------------------------------------------
# TASK SETTINGS
# -----------------------------------------------------------------------------
# Task serialization
task_serializer = "json"
accept_content = ["json"]
result_serializer = "json"
# Task result settings
result_expires = 3600 # Results expire after 1 hour
result_extended = True # Store additional metadata
# Task execution settings
task_acks_late = True # Acknowledge tasks after execution (safer)
task_reject_on_worker_lost = True # Reject tasks if worker dies
worker_prefetch_multiplier = 1 # Only prefetch 1 task at a time (fair distribution)
# Task time limits (prevent runaway tasks)
task_soft_time_limit = 3600 # Soft limit: 1 hour (raises exception)
task_time_limit = 3900 # Hard limit: 65 minutes (kills task)
# Task retry settings
task_autoretry_for = (Exception,) # Auto-retry on any exception
task_retry_kwargs = {"max_retries": 3} # Max 3 retries
task_default_retry_delay = 60 # Wait 60 seconds between retries
# Always eager in tests (execute immediately, no async)
task_always_eager = settings.CELERY_TASK_ALWAYS_EAGER
task_eager_propagates = True # Propagate exceptions in eager mode
# -----------------------------------------------------------------------------
# QUEUE CONFIGURATION
# -----------------------------------------------------------------------------
# Define task queues with priorities
task_queues = (
Queue(
"default",
routing_key="task.#",
queue_arguments={"x-max-priority": 10},
),
Queue(
"high_priority",
routing_key="high.#",
queue_arguments={"x-max-priority": 10},
),
Queue(
"low_priority",
routing_key="low.#",
queue_arguments={"x-max-priority": 10},
),
)
# Default queue for tasks
task_default_queue = "default"
task_default_exchange = "tasks"
task_default_exchange_type = "topic"
task_default_routing_key = "task.default"
# Task routing (map tasks to specific queues)
task_routes = {
# High priority: Critical workflow tasks
"lazy_bird.tasks.task_executor.execute_task": {
"queue": "high_priority",
"routing_key": "high.execute",
},
"lazy_bird.tasks.webhook_delivery.deliver_webhook_task": {
"queue": "high_priority",
"routing_key": "high.webhook",
},
# Default priority: Regular tasks
"lazy_bird.tasks.issue_watcher.watch_issues": {
"queue": "default",
"routing_key": "task.watch",
},
"lazy_bird.tasks.queue_processor.process_queue": {
"queue": "default",
"routing_key": "task.queue",
},
# Low priority: Cleanup and maintenance
"lazy_bird.tasks.cleanup.cleanup_old_worktrees": {
"queue": "low_priority",
"routing_key": "low.cleanup",
},
"lazy_bird.tasks.cleanup.cleanup_expired_results": {
"queue": "low_priority",
"routing_key": "low.cleanup",
},
}
# -----------------------------------------------------------------------------
# WORKER SETTINGS
# -----------------------------------------------------------------------------
# Worker concurrency (number of worker processes)
worker_concurrency = settings.MAX_PARALLEL_TASKS # From config (default: 3)
# Worker pool type
worker_pool = "prefork" # Use multiprocessing pool (can also be 'threads', 'solo')
# Worker log settings
worker_log_format = "[%(asctime)s: %(levelname)s/%(processName)s] %(message)s"
worker_task_log_format = (
"[%(asctime)s: %(levelname)s/%(processName)s] [%(task_name)s(%(task_id)s)] %(message)s"
)
# Worker hijack root logger (integrate with lazy_bird logging)
worker_hijack_root_logger = False
# Worker send events (enable monitoring)
worker_send_task_events = True
task_send_sent_event = True
# -----------------------------------------------------------------------------
# BEAT SCHEDULE (Periodic Tasks)
# -----------------------------------------------------------------------------
# Schedule for periodic tasks (cron-like)
beat_schedule = {
"watch-issues-every-60s": {
"task": "lazy_bird.tasks.issue_watcher.watch_issues",
"schedule": 60.0, # Run every 60 seconds
"options": {
"queue": "default",
"priority": 5,
},
},
"process-queue-every-60-seconds": {
"task": "lazy_bird.tasks.queue_processor.process_queue",
"schedule": 60.0, # Run every 60 seconds
"options": {
"queue": "default",
"priority": 5,
},
},
"cleanup-old-worktrees-daily": {
"task": "lazy_bird.tasks.cleanup.cleanup_old_worktrees",
"schedule": 86400.0, # Run daily (24 hours)
"options": {
"queue": "low_priority",
"priority": 1,
},
},
"cleanup-expired-results-hourly": {
"task": "lazy_bird.tasks.cleanup.cleanup_expired_results",
"schedule": 3600.0, # Run every hour
"options": {
"queue": "low_priority",
"priority": 1,
},
},
}
# Timezone for beat schedule
timezone = "UTC"
enable_utc = True
# -----------------------------------------------------------------------------
# MONITORING & DEBUGGING
# -----------------------------------------------------------------------------
# Task track started (track when tasks start, not just finish)
task_track_started = True
# Task ignore result (don't store results for these tasks)
task_ignore_result = False
# Worker disable rate limits (for development)
worker_disable_rate_limits = settings.is_development
# Task store errors even if ignored
task_store_errors_even_if_ignored = True
| """Celery configuration for Lazy-Bird.
This module contains all Celery configuration settings:
- Broker and result backend URLs
- Task serialization and result formats
- Task routing and queue configuration
- Worker settings and limits
- Beat schedule for periodic tasks
"""
from kombu import Queue
from lazy_bird.core.config import settings
# -----------------------------------------------------------------------------
# BROKER SETTINGS
# -----------------------------------------------------------------------------
# Redis broker URL
broker_url = settings.CELERY_BROKER_URL
# Result backend URL (separate Redis DB for results)
result_backend = settings.CELERY_RESULT_BACKEND
# Connection settings
broker_connection_retry_on_startup = True
broker_connection_retry = True
broker_connection_max_retries = 10
# -----------------------------------------------------------------------------
# TASK SETTINGS
# -----------------------------------------------------------------------------
# Task serialization
task_serializer = "json"
accept_content = ["json"]
result_serializer = "json"
# Task result settings
result_expires = 3600 # Results expire after 1 hour
result_extended = True # Store additional metadata
# Task execution settings
task_acks_late = True # Acknowledge tasks after execution (safer)
task_reject_on_worker_lost = True # Reject tasks if worker dies
worker_prefetch_multiplier = 1 # Only prefetch 1 task at a time (fair distribution)
# Task time limits (prevent runaway tasks)
task_soft_time_limit = 3600 # Soft limit: 1 hour (raises exception)
task_time_limit = 3900 # Hard limit: 65 minutes (kills task)
# Task retry settings
task_autoretry_for = (Exception,) # Auto-retry on any exception
task_retry_kwargs = {"max_retries": 3} # Max 3 retries
task_default_retry_delay = 60 # Wait 60 seconds between retries
# Always eager in tests (execute immediately, no async)
task_always_eager = settings.CELERY_TASK_ALWAYS_E | [] | yusufkaraaslan/lazy-bird | lazy_bird/tasks/celeryconfig.py |
"""Celery application for background task processing.
This module initializes the Celery application for Lazy-Bird's background tasks:
- Queue processing (polling for queued TaskRuns)
- Task execution (running Claude Code, git operations, tests)
- Webhook delivery with retry logic
- Periodic cleanup tasks
Usage:
# Start Celery worker
celery -A lazy_bird.tasks worker --loglevel=info
# Start Celery beat (for periodic tasks)
celery -A lazy_bird.tasks beat --loglevel=info
# Start both worker and beat
celery -A lazy_bird.tasks worker --beat --loglevel=info
"""
from celery import Celery
from lazy_bird.core.config import settings
# Create Celery app instance
app = Celery("lazy_bird")
# Load configuration from celeryconfig module
app.config_from_object("lazy_bird.tasks.celeryconfig")
# Auto-discover tasks in these modules
app.autodiscover_tasks(
[
"lazy_bird.tasks",
]
)
@app.task(bind=True)
def debug_task(self):
"""Debug task to test Celery is working."""
print(f"Request: {self.request!r}")
return {
"status": "ok",
"celery_version": self.app.VERSION if hasattr(self.app, "VERSION") else "unknown",
"task_id": self.request.id,
}
__all__ = ["app", "debug_task"]
| """Celery application for background task processing.
This module initializes the Celery application for Lazy-Bird's background tasks:
- Queue processing (polling for queued TaskRuns)
- Task execution (running Claude Code, git operations, tests)
- Webhook delivery with retry logic
- Periodic cleanup tasks
Usage:
# Start Celery worker
celery -A lazy_bird.tasks worker --loglevel=info
# Start Celery beat (for periodic tasks)
celery -A lazy_bird.tasks beat --loglevel=info
# Start both worker and beat
celery -A lazy_bird.tasks worker --beat --loglevel=info
"""
from celery import Celery
from lazy_bird.core.config import settings
# Create Celery app instance
app = Celery("lazy_bird")
# Load configuration from celeryconfig module
app.config_from_object("lazy_bird.tasks.celeryconfig")
# Auto-discover tasks in these modules
app.autodiscover_tasks(
[
"lazy_bird.tasks",
]
)
@app.task(bind=True)
def debug_task(self):
"""Debug task to test Celery is working."""
print(f"Request: {self.request!r}")
return {
"status": "ok",
"celery_version": self.app.VERSION if hasattr(self.app, "VERSION") else "unknown",
"task_id": self.request.id,
}
__all__ = ["app", "debug_task"]
| [
"# celery/celery:celery/app/base.py\nCelery"
] | yusufkaraaslan/lazy-bird | lazy_bird/tasks/__init__.py |
"""Webhook delivery service with HMAC signatures and retry logic.
This module provides webhook event publishing with:
- HMAC-SHA256 signature generation for security
- Automatic retry with exponential backoff
- Failure tracking and auto-disable
- Async HTTP delivery
"""
import asyncio
import hashlib
import hmac
import json
from datetime import datetime, timezone
from typing import Any, Dict, List, Optional
from uuid import UUID
import httpx
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from lazy_bird.core.logging import get_logger
from lazy_bird.models.webhook_subscription import WebhookSubscription
logger = get_logger(__name__)
# Webhook delivery configuration
MAX_RETRIES = 3
INITIAL_BACKOFF_SECONDS = 1
MAX_BACKOFF_SECONDS = 60
TIMEOUT_SECONDS = 30
MAX_CONSECUTIVE_FAILURES = 10 # Auto-disable after this many failures
def generate_webhook_signature(payload: str, secret: str) -> str:
"""Generate HMAC-SHA256 signature for webhook payload.
Args:
payload: JSON payload as string
secret: Webhook subscription secret
Returns:
str: Hexadecimal HMAC-SHA256 signature
Example:
>>> payload = '{"event": "task.completed"}'
>>> secret = "my-webhook-secret"
>>> sig = generate_webhook_signature(payload, secret)
>>> len(sig)
64 # SHA-256 produces 64 hex characters
"""
signature = hmac.new(
secret.encode("utf-8"), payload.encode("utf-8"), hashlib.sha256
).hexdigest()
return signature
def verify_webhook_signature(payload: str, secret: str, signature: str) -> bool:
"""Verify HMAC-SHA256 signature for webhook payload.
Args:
payload: JSON payload as string
secret: Webhook subscription secret
signature: HMAC signature to verify
Returns:
bool: True if signature is valid, False otherwise
Example:
>>> payload = '{"event": "task.completed"}'
>>> secret = "my-webhook-secret"
>>> sig = generate_webhook_signature(payload, secret)
>>> verify_webhook_signature(payload, secret, sig)
True
>>> verify_webhook_signature(payload, secret, "wrong_signature")
False
"""
expected_signature = generate_webhook_signature(payload, secret)
return hmac.compare_digest(signature, expected_signature)
async def deliver_webhook(
url: str,
payload: Dict[str, Any],
secret: str,
timeout: int = TIMEOUT_SECONDS,
) -> Dict[str, Any]:
"""Deliver webhook event to endpoint with HMAC signature.
Args:
url: Webhook endpoint URL
payload: Event payload dictionary
secret: Webhook subscription secret
timeout: Request timeout in seconds
Returns:
Dict with delivery result:
- success: bool - Whether delivery succeeded
- status_code: int - HTTP status code
- response_time_ms: float - Response time in milliseconds
- error: str - Error message (if failed)
Example:
>>> payload = {"event": "task.completed", "data": {...}}
>>> result = await deliver_webhook("https://example.com/webhook", payload, "secret")
>>> result["success"]
True
>>> result["status_code"]
200
"""
# Serialize payload to JSON
payload_json = json.dumps(payload, default=str)
# Generate HMAC signature
signature = generate_webhook_signature(payload_json, secret)
# Prepare headers
headers = {
"Content-Type": "application/json",
"X-Lazy-Bird-Signature": f"sha256={signature}",
"X-Lazy-Bird-Event": payload.get("event", "unknown"),
"User-Agent": "Lazy-Bird-Webhook/2.0",
}
# Measure response time
start_time = datetime.now(timezone.utc)
try:
async with httpx.AsyncClient(timeout=timeout) as client:
response = await client.post(url, content=payload_json, headers=headers)
# Calculate response time
end_time = datetime.now(timezone.utc)
response_time_ms = (end_time - start_time).total_seconds() * 1000
# Check if successful (2xx status code)
success = 200 <= response.status_code < 300
return {
"success": success,
"status_code": response.status_code,
"response_time_ms": response_time_ms,
"response_body": response.text[:500], # First 500 chars
}
except httpx.TimeoutException as e:
end_time = datetime.now(timezone.utc)
response_time_ms = (end_time - start_time).total_seconds() * 1000
logger.warning(f"Webhook delivery timeout: {url}", exc_info=True)
return {
"success": False,
"status_code": 0,
"response_time_ms": response_time_ms,
"error": f"Request timeout after {timeout}s",
}
except Exception as e:
end_time = datetime.now(timezone.utc)
response_time_ms = (end_time - start_time).total_seconds() * 1000
logger.error(f"Webhook delivery failed: {url}", exc_info=True)
return {
"success": False,
"status_code": 0,
"response_time_ms": response_time_ms,
"error": str(e),
}
async def deliver_webhook_with_retry(
subscription: WebhookSubscription,
payload: Dict[str, Any],
db: AsyncSession,
max_retries: int = MAX_RETRIES,
) -> Dict[str, Any]:
"""Deliver webhook with automatic retry and exponential backoff.
Args:
subscription: WebhookSubscription model instance
payload: Event payload dictionary
db: Database session for updating failure counts
max_retries: Maximum number of retry attempts
Returns:
Dict with delivery result (same as deliver_webhook)
Example:
>>> subscription = WebhookSubscription(url="https://example.com/webhook", secret="secret")
>>> payload = {"event": "task.completed"}
>>> result = await deliver_webhook_with_retry(subscription, payload, db)
>>> result["success"]
True
"""
last_result = None
backoff_seconds = INITIAL_BACKOFF_SECONDS
for attempt in range(max_retries + 1): # +1 for initial attempt
# Deliver webhook
result = await deliver_webhook(
url=subscription.url,
payload=payload,
secret=subscription.secret,
)
last_result = result
# If successful, reset failure count and update last_triggered_at
if result["success"]:
if subscription.failure_count > 0:
subscription.failure_count = 0
subscription.last_failure_at = None
subscription.last_triggered_at = datetime.now(timezone.utc)
await db.commit()
logger.info(
f"Webhook delivered successfully: {subscription.url}",
extra={
"extra_fields": {
"subscription_id": str(subscription.id),
"attempt": attempt + 1,
"status_code": result["status_code"],
"response_time_ms": result["response_time_ms"],
}
},
)
return result
# If failed and retries remain, wait and retry
if attempt < max_retries:
wait_time = min(backoff_seconds, MAX_BACKOFF_SECONDS)
logger.warning(
f"Webhook delivery failed (attempt {attempt + 1}/{max_retries + 1}), "
f"retrying in {wait_time}s: {subscription.url}",
extra={
"extra_fields": {
"subscription_id": str(subscription.id),
"status_code": result.get("status_code"),
"error": result.get("error"),
}
},
)
await asyncio.sleep(wait_time)
backoff_seconds *= 2 # Exponential backoff
# All retries failed - update failure count
subscription.failure_count += 1
subscription.last_failure_at = datetime.now(timezone.utc)
# Auto-disable if too many consecutive failures
if subscription.failure_count >= MAX_CONSECUTIVE_FAILURES:
subscription.is_active = False
logger.error(
f"Webhook auto-disabled after {MAX_CONSECUTIVE_FAILURES} consecutive failures: {subscription.url}",
extra={
"extra_fields": {
"subscription_id": str(subscription.id),
"failure_count": subscription.failure_count,
}
},
)
await db.commit()
logger.error(
f"Webhook delivery failed after {max_retries + 1} attempts: {subscription.url}",
extra={
"extra_fields": {
"subscription_id": str(subscription.id),
"failure_count": subscription.failure_count,
"last_error": last_result.get("error"),
}
},
)
return last_result
async def publish_event(
event_type: str,
data: Dict[str, Any],
db: AsyncSession,
project_id: Optional[UUID] = None,
) -> int:
"""Publish event to all matching webhook subscriptions.
Args:
event_type: Event type (e.g., "task.completed", "pr.created")
data: Event data dictionary
db: Database session for querying subscriptions
project_id: Project ID (None for global events)
Returns:
int: Number of webhooks delivered
Example:
>>> event_data = {
... "task_run_id": "uuid",
... "status": "success",
... "duration_seconds": 120
... }
>>> count = await publish_event("task.completed", event_data, db, project_id=uuid)
>>> count
3 # Delivered to 3 subscriptions
"""
# Build query to find matching subscriptions
query = select(WebhookSubscription).where(
WebhookSubscription.is_active == True, # noqa: E712
WebhookSubscription.events.contains([event_type]),
)
# Filter by project_id (NULL matches global subscriptions)
if project_id is not None:
# Match both global (NULL) and project-specific subscriptions
query = query.where(
(WebhookSubscription.project_id == project_id)
| (WebhookSubscription.project_id.is_(None))
)
else:
# Only match global subscriptions
query = query.where(WebhookSubscription.project_id.is_(None))
# Execute query
result = await db.execute(query)
subscriptions = result.scalars().all()
if not subscriptions:
logger.debug(
f"No active webhook subscriptions found for event: {event_type}",
extra={
"extra_fields": {
"event_type": event_type,
"project_id": str(project_id) if project_id else None,
}
},
)
return 0
# Build webhook payload
payload = {
"event": event_type,
"timestamp": datetime.now(timezone.utc).isoformat(),
"project_id": str(project_id) if project_id else None,
"data": data,
}
# Deliver to all matching subscriptions in parallel
delivery_tasks = [
deliver_webhook_with_retry(subscription, payload, db) for subscription in subscriptions
]
await asyncio.gather(*delivery_tasks, return_exceptions=True)
logger.info(
f"Published event to {len(subscriptions)} webhook(s): {event_type}",
extra={
"extra_fields": {
"event_type": event_type,
"subscription_count": len(subscriptions),
"project_id": str(project_id) if project_id else None,
}
},
)
return len(subscriptions)
async def send_test_webhook(
subscription: WebhookSubscription,
db: AsyncSession,
) -> Dict[str, Any]:
"""Send a test event to verify webhook configuration.
Args:
subscription: WebhookSubscription model instance
db: Database session
Returns:
Dict with delivery result:
- success: bool
- status_code: int
- response_time_ms: float
- message: str
Example:
>>> subscription = WebhookSubscription(url="https://example.com/webhook", secret="secret")
>>> result = await send_test_webhook(subscription, db)
>>> result["success"]
True
"""
# Build test payload
payload = {
"event": "webhook.test",
"timestamp": datetime.now(timezone.utc).isoformat(),
"subscription_id": str(subscription.id),
"data": {
"message": "This is a test webhook delivery from Lazy-Bird",
"url": subscription.url,
"events": subscription.events,
},
}
# Deliver without retry (test should be fast)
result = await deliver_webhook(
url=subscription.url,
payload=payload,
secret=subscription.secret,
)
# Add user-friendly message
if result["success"]:
result["message"] = "Test webhook delivered successfully"
else:
result["message"] = f"Test webhook delivery failed: {result.get('error', 'Unknown error')}"
return result
| """Webhook delivery service with HMAC signatures and retry logic.
This module provides webhook event publishing with:
- HMAC-SHA256 signature generation for security
- Automatic retry with exponential backoff
- Failure tracking and auto-disable
- Async HTTP delivery
"""
import asyncio
import hashlib
import hmac
import json
from datetime import datetime, timezone
from typing import Any, Dict, List, Optional
from uuid import UUID
import httpx
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from lazy_bird.core.logging import get_logger
from lazy_bird.models.webhook_subscription import WebhookSubscription
logger = get_logger(__name__)
# Webhook delivery configuration
MAX_RETRIES = 3
INITIAL_BACKOFF_SECONDS = 1
MAX_BACKOFF_SECONDS = 60
TIMEOUT_SECONDS = 30
MAX_CONSECUTIVE_FAILURES = 10 # Auto-disable after this many failures
def generate_webhook_signature(payload: str, secret: str) -> str:
"""Generate HMAC-SHA256 signature for webhook payload.
Args:
payload: JSON payload as string
secret: Webhook subscription secret
Returns:
str: Hexadecimal HMAC-SHA256 signature
Example:
>>> payload = '{"event": "task.completed"}'
>>> secret = "my-webhook-secret"
>>> sig = generate_webhook_signature(payload, secret)
>>> len(sig)
64 # SHA-256 produces 64 hex characters
"""
signature = hmac.new(
secret.encode("utf-8"), payload.encode("utf-8"), hashlib.sha256
).hexdigest() | [
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/sql/_selectable_constructors.py\nselect",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/ext/asyncio/session.py\nAsyncSession",
"# yusufkaraaslan/lazy-bird:lazy_bird/core/logging.py\nget_logger",
"# yusufkaraaslan/lazy-bird:lazy_bird/models/webhook_subscription.py\nWebhookSubs... | yusufkaraaslan/lazy-bird | lazy_bird/services/webhook_service.py |
"""Test runner service for executing and parsing tests across frameworks.
This module provides the TestRunner class for executing tests for different
frameworks (Godot, Python, Rust, etc.), parsing results, and handling failures.
"""
import json
import logging
import re
import subprocess
from datetime import datetime, timezone
from pathlib import Path
from typing import Dict, Any, Optional, List
logger = logging.getLogger(__name__)
class TestRunnerError(Exception):
"""Base exception for test runner errors."""
pass
class TestExecutionError(TestRunnerError):
"""Exception raised when test execution fails unexpectedly."""
def __init__(self, command: str, return_code: int, stderr: str, stdout: str = ""):
self.command = command
self.return_code = return_code
self.stderr = stderr
self.stdout = stdout
super().__init__(
f"Test execution failed with code {return_code}:\n"
f"Command: {command}\n"
f"Error: {stderr}"
)
class TestParser:
"""Base class for test output parsing."""
def parse(self, log_content: str) -> Dict[str, Any]:
"""Parse test log and return structured results."""
raise NotImplementedError
class GodotTestParser(TestParser):
"""Parser for Godot gdUnit4 test output."""
def parse(self, log_content: str) -> Dict[str, Any]:
"""Parse Godot test errors with file/line extraction."""
errors = []
stats = {"total": 0, "passed": 0, "failed": 0, "errors": 0}
# Extract test statistics
stats_pattern = (
r"Tests:\s*(\d+)\s*\|\s*Passed:\s*(\d+)\s*\|\s*Failed:\s*(\d+)\s*\|\s*Errors:\s*(\d+)"
)
stats_match = re.search(stats_pattern, log_content)
if stats_match:
stats = {
"total": int(stats_match.group(1)),
"passed": int(stats_match.group(2)),
"failed": int(stats_match.group(3)),
"errors": int(stats_match.group(4)),
}
# Extract failed tests with details
failed_pattern = r"FAILED:\s+(\S+)\s+(.*?)\s+at\s+(res://[^:]+):(\d+)"
for match in re.finditer(failed_pattern, log_content, re.DOTALL):
test_name = match.group(1)
error_msg = match.group(2).strip()
file_path = match.group(3)
line_number = int(match.group(4))
errors.append(
{
"test_name": test_name,
"file": file_path,
"line": line_number,
"error": error_msg,
"type": "test_failure",
}
)
# Capture assertion failures
assert_pattern = r"Assertion\s+failed:?\s+(.*?)\s+at\s+(res://[^:]+):(\d+)"
for match in re.finditer(assert_pattern, log_content):
error_msg = match.group(1).strip()
file_path = match.group(2)
line_number = int(match.group(3))
errors.append(
{
"test_name": "assertion",
"file": file_path,
"line": line_number,
"error": error_msg,
"type": "assertion_failure",
}
)
return {
"framework": "godot",
"stats": stats,
"errors": errors,
"error_count": len(errors),
"success": stats["failed"] == 0 and stats["errors"] == 0,
}
class PythonTestParser(TestParser):
"""Parser for Python pytest output."""
def parse(self, log_content: str) -> Dict[str, Any]:
"""Parse pytest errors with stack trace extraction."""
errors = []
stats = {"total": 0, "passed": 0, "failed": 0, "errors": 0}
# Extract test statistics
stats_pattern = r"(\d+)\s+failed.*?(\d+)\s+passed"
stats_match = re.search(stats_pattern, log_content)
if stats_match:
stats["failed"] = int(stats_match.group(1))
stats["passed"] = int(stats_match.group(2))
stats["total"] = stats["failed"] + stats["passed"]
# Extract failed tests
failed_pattern = r"FAILED\s+([^:]+)::(\S+)\s+-\s+(.*?)$"
for match in re.finditer(failed_pattern, log_content, re.MULTILINE):
file_path = match.group(1)
test_name = match.group(2)
error_msg = match.group(3).strip()
# Try to extract line number from stack trace
line_number = None
stack_pattern = rf"{re.escape(file_path)}:(\d+):"
stack_match = re.search(stack_pattern, log_content)
if stack_match:
line_number = int(stack_match.group(1))
errors.append(
{
"test_name": test_name,
"file": file_path,
"line": line_number,
"error": error_msg,
"type": "test_failure",
}
)
return {
"framework": "python",
"stats": stats,
"errors": errors,
"error_count": len(errors),
"success": stats["failed"] == 0,
}
class RustTestParser(TestParser):
"""Parser for Rust cargo test output."""
def parse(self, log_content: str) -> Dict[str, Any]:
"""Parse Rust test errors."""
errors = []
stats = {"total": 0, "passed": 0, "failed": 0, "errors": 0}
# Extract test statistics
stats_pattern = r"test\s+result:.*?(\d+)\s+passed;\s+(\d+)\s+failed"
stats_match = re.search(stats_pattern, log_content)
if stats_match:
stats["passed"] = int(stats_match.group(1))
stats["failed"] = int(stats_match.group(2))
stats["total"] = stats["passed"] + stats["failed"]
# Extract failed tests with panic info
test_pattern = r"----\s+([\w:]+)\s+stdout\s+----\s+(.*?)(?=\n\n|$)"
for match in re.finditer(test_pattern, log_content, re.DOTALL):
test_name = match.group(1)
output = match.group(2)
# Extract file and line from panic
panic_pattern = r"panicked at '([^']+)',\s+([^:]+):(\d+):\d+"
panic_match = re.search(panic_pattern, output)
if panic_match:
error_msg = panic_match.group(1)
file_path = panic_match.group(2)
line_number = int(panic_match.group(3))
errors.append(
{
"test_name": test_name,
"file": file_path,
"line": line_number,
"error": error_msg,
"type": "panic",
}
)
return {
"framework": "rust",
"stats": stats,
"errors": errors,
"error_count": len(errors),
"success": stats["failed"] == 0,
}
class GenericTestParser(TestParser):
"""Generic parser for unknown frameworks."""
def parse(self, log_content: str) -> Dict[str, Any]:
"""Basic parsing - just check for common failure patterns."""
# Look for common failure indicators
has_failures = any(
[
re.search(r"\bFAILED\b", log_content, re.IGNORECASE),
re.search(r"\bERROR\b", log_content, re.IGNORECASE),
re.search(r"\bFAIL\b", log_content),
]
)
return {
"framework": "generic",
"stats": {"total": 0, "passed": 0, "failed": 0, "errors": 0},
"errors": [],
"error_count": 0,
"success": not has_failures,
"raw_output": log_content,
}
class TestRunner:
"""Service for running tests across different frameworks.
Executes test commands, parses output, and handles failures.
"""
# Map framework types to parsers
PARSERS = {
"godot": GodotTestParser(),
"python": PythonTestParser(),
"pytest": PythonTestParser(),
"django": PythonTestParser(),
"flask": PythonTestParser(),
"fastapi": PythonTestParser(),
"rust": RustTestParser(),
"cargo": RustTestParser(),
}
def __init__(self, timeout: int = 600):
"""Initialize TestRunner.
Args:
timeout: Test execution timeout in seconds (default: 600)
"""
self.timeout = timeout
def run_tests(
self,
test_command: str,
working_directory: Path,
framework: str,
project_id: Optional[str] = None,
task_id: Optional[int] = None,
log_file: Optional[Path] = None,
) -> Dict[str, Any]:
"""Execute tests and parse results.
Args:
test_command: Command to execute tests
working_directory: Directory to run tests in
framework: Framework type (godot, python, rust, etc.)
project_id: Project identifier for logging
task_id: Task ID for logging
log_file: Path to save test output
Returns:
dict: Test results
- success: bool
- framework: str
- stats: dict (total, passed, failed, errors)
- errors: list of error details
- output: str (test command output)
- execution_time: float (seconds)
- log_file: str (path to log file)
Raises:
TestExecutionError: If test execution fails unexpectedly
"""
start_time = datetime.now(timezone.utc)
# Determine log file path
if log_file is None:
log_dir = Path("/tmp/lazy-bird-logs") / "tests"
log_dir.mkdir(parents=True, exist_ok=True)
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
prefix = f"{project_id}-{task_id}" if project_id and task_id else "test"
log_file = log_dir / f"{prefix}_{timestamp}.log"
logger.info(
f"Running tests",
extra={
"extra_fields": {
"project_id": project_id,
"task_id": task_id,
"framework": framework,
"command": test_command,
"working_directory": str(working_directory),
"log_file": str(log_file),
}
},
)
try:
# Execute test command
result = subprocess.run(
test_command,
cwd=working_directory,
shell=True, # nosec B602 - test commands come from project config, not user input
capture_output=True,
text=True,
timeout=self.timeout,
)
# Write output to log file
with open(log_file, "w") as f:
f.write("=" * 80 + "\n")
f.write("Test Execution Log\n")
f.write("=" * 80 + "\n\n")
f.write(f"Command: {test_command}\n")
f.write(f"Working Directory: {working_directory}\n")
f.write(f"Framework: {framework}\n")
f.write(f"Start Time: {start_time.isoformat()}\n")
f.write(f"Return Code: {result.returncode}\n\n")
f.write("=" * 80 + "\n")
f.write("STDOUT:\n")
f.write("=" * 80 + "\n")
f.write(result.stdout + "\n\n")
f.write("=" * 80 + "\n")
f.write("STDERR:\n")
f.write("=" * 80 + "\n")
f.write(result.stderr + "\n")
# Parse test output
parser = self._get_parser(framework)
parsed_result = parser.parse(result.stdout + "\n" + result.stderr)
# Calculate execution time
end_time = datetime.now(timezone.utc)
execution_time = (end_time - start_time).total_seconds()
# Build final result
test_result = {
**parsed_result,
"command": test_command,
"return_code": result.returncode,
"output": result.stdout,
"stderr": result.stderr,
"execution_time": execution_time,
"log_file": str(log_file),
}
# Log result
log_level = "info" if test_result["success"] else "error"
getattr(logger, log_level)(
f"Tests {'passed' if test_result['success'] else 'failed'}",
extra={
"extra_fields": {
"project_id": project_id,
"task_id": task_id,
"framework": framework,
"success": test_result["success"],
"stats": test_result["stats"],
"error_count": test_result["error_count"],
"execution_time": execution_time,
}
},
)
return test_result
except subprocess.TimeoutExpired as e:
error_msg = f"Test execution timed out after {self.timeout}s"
logger.error(
error_msg,
extra={
"extra_fields": {
"project_id": project_id,
"task_id": task_id,
"framework": framework,
"timeout": self.timeout,
}
},
)
return {
"success": False,
"framework": framework,
"stats": {"total": 0, "passed": 0, "failed": 0, "errors": 1},
"errors": [{"error": error_msg, "type": "timeout"}],
"error_count": 1,
"output": "",
"stderr": error_msg,
"execution_time": self.timeout,
"log_file": str(log_file),
}
except Exception as e:
error_msg = f"Unexpected test execution error: {str(e)}"
logger.error(
error_msg,
extra={
"extra_fields": {
"project_id": project_id,
"task_id": task_id,
"framework": framework,
"error": str(e),
}
},
exc_info=True,
)
raise TestRunnerError(error_msg) from e
def _get_parser(self, framework: str) -> TestParser:
"""Get appropriate parser for framework.
Args:
framework: Framework type
Returns:
TestParser: Parser instance
"""
# Normalize framework name
framework_lower = framework.lower()
# Return specific parser or generic fallback
return self.PARSERS.get(framework_lower, GenericTestParser())
def format_error_summary(self, test_result: Dict[str, Any]) -> str:
"""Format test errors into a human-readable summary.
Args:
test_result: Test result from run_tests()
Returns:
str: Formatted error summary for Claude retry context
"""
if test_result["success"]:
return "All tests passed!"
summary_lines = []
summary_lines.append(f"Test Summary ({test_result['framework']}):")
summary_lines.append(f" Total: {test_result['stats']['total']}")
summary_lines.append(f" Passed: {test_result['stats']['passed']}")
summary_lines.append(f" Failed: {test_result['stats']['failed']}")
if test_result["errors"]:
summary_lines.append("\nTest Failures:")
for i, error in enumerate(test_result["errors"][:5], 1): # Limit to 5
summary_lines.append(f"\n{i}. {error.get('test_name', 'Unknown test')}")
if error.get("file"):
summary_lines.append(f" File: {error['file']}")
if error.get("line"):
summary_lines.append(f" Line: {error['line']}")
summary_lines.append(f" Error: {error.get('error', 'Unknown error')}")
if len(test_result["errors"]) > 5:
summary_lines.append(f"\n... and {len(test_result['errors']) - 5} more failures")
return "\n".join(summary_lines)
__all__ = [
"TestRunner",
"TestRunnerError",
"TestExecutionError",
"GodotTestParser",
"PythonTestParser",
"RustTestParser",
"GenericTestParser",
]
| """Test runner service for executing and parsing tests across frameworks.
This module provides the TestRunner class for executing tests for different
frameworks (Godot, Python, Rust, etc.), parsing results, and handling failures.
"""
import json
import logging
import re
import subprocess
from datetime import datetime, timezone
from pathlib import Path
from typing import Dict, Any, Optional, List
logger = logging.getLogger(__name__)
class TestRunnerError(Exception):
"""Base exception for test runner errors."""
pass
class TestExecutionError(TestRunnerError):
"""Exception raised when test execution fails unexpectedly."""
def __init__(self, command: str, return_code: int, stderr: str, stdout: str = ""):
self.command = command
self.return_code = return_code
self.stderr = stderr
self.stdout = stdout
super().__init__(
f"Test execution failed with code {return_code}:\n"
f"Command: {command}\n"
f"Error: {stderr}"
)
class TestParser:
"""Base class for test output parsing."""
def parse(self, log_content: str) -> Dict[str, Any]:
"""Parse test log and return structured results."""
raise NotImplementedError
class GodotTestParser(TestParser):
"""Parser for Godot gdUnit4 test output."""
def parse(self, log_content: str) -> Dict[str, Any]:
"""Parse Godot test errors with file/line extraction."""
errors = []
| [] | yusufkaraaslan/lazy-bird | lazy_bird/services/test_runner.py |
"""Seed framework presets from config/framework-presets.yml into the database.
This module reads the YAML preset definitions and upserts them into the
framework_presets table on application startup, ensuring the database
always reflects the latest preset configurations.
"""
import os
from pathlib import Path
from typing import Any, Dict, Tuple
import yaml
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from lazy_bird.core.logging import get_logger
from lazy_bird.models.framework_preset import FrameworkPreset
logger = get_logger(__name__)
# Mapping from YAML preset key to (framework_type, language)
PRESET_METADATA: Dict[str, Tuple[str, str]] = {
# Game Engines
"godot": ("game_engine", "gdscript"),
"unity": ("game_engine", "csharp"),
"unreal": ("game_engine", "cpp"),
"bevy": ("game_engine", "rust"),
# Backend Frameworks
"django": ("backend", "python"),
"flask": ("backend", "python"),
"fastapi": ("backend", "python"),
"express": ("backend", "javascript"),
"rails": ("backend", "ruby"),
# Frontend Frameworks
"react": ("frontend", "javascript"),
"vue": ("frontend", "javascript"),
"angular": ("frontend", "typescript"),
"svelte": ("frontend", "javascript"),
# Languages
"python": ("language", "python"),
"rust": ("language", "rust"),
"go": ("language", "go"),
"nodejs": ("language", "javascript"),
"cpp": ("language", "cpp"),
"java": ("language", "java"),
# Custom
"custom": ("language", "other"),
}
def _get_presets_yaml_path() -> Path:
"""Resolve the path to framework-presets.yml.
Looks relative to the project root (two levels up from this file's
package directory), falling back to a LAZY_BIRD_PRESETS_PATH env var.
Returns:
Path to the framework-presets.yml file.
"""
env_path = os.environ.get("LAZY_BIRD_PRESETS_PATH")
if env_path:
return Path(env_path)
# Default: <project_root>/config/framework-presets.yml
project_root = Path(__file__).resolve().parent.parent.parent
return project_root / "config" / "framework-presets.yml"
def _load_presets_from_yaml(yaml_path: Path) -> Dict[str, Dict[str, Any]]:
"""Load and parse the framework presets YAML file.
Args:
yaml_path: Path to the YAML file.
Returns:
Dictionary of preset_key -> preset_data.
Raises:
FileNotFoundError: If the YAML file does not exist.
yaml.YAMLError: If the YAML is malformed.
"""
with open(yaml_path, "r") as f:
data = yaml.safe_load(f)
return data.get("presets", {})
def _yaml_preset_to_model_kwargs(key: str, preset_data: Dict[str, Any]) -> Dict[str, Any]:
"""Convert a YAML preset entry to FrameworkPreset constructor kwargs.
Args:
key: The preset identifier key from YAML (e.g. 'godot', 'django').
preset_data: The preset data dictionary from YAML.
Returns:
Dictionary of keyword arguments for FrameworkPreset.
"""
framework_type, language = PRESET_METADATA.get(key, ("language", "other"))
return {
"name": key,
"display_name": preset_data.get("name", key.title()),
"description": preset_data.get("description"),
"framework_type": framework_type,
"language": language,
"test_command": preset_data.get("test_command", "make test"),
"build_command": preset_data.get("build_command"),
"lint_command": preset_data.get("lint_command"),
"format_command": preset_data.get("format_command"),
"config_files": {
"file_extensions": preset_data.get("file_extensions", []),
"ignore_patterns": preset_data.get("ignore_patterns", []),
"docs_url": preset_data.get("docs_url"),
},
"is_builtin": True,
}
async def seed_framework_presets(db: AsyncSession) -> Tuple[int, int]:
"""Seed framework presets from YAML into the database.
For each preset in the YAML file:
- If it does not exist (by name), create a new FrameworkPreset with is_builtin=True.
- If it already exists, update its fields to match the YAML (upsert).
Args:
db: An async SQLAlchemy session.
Returns:
Tuple of (created_count, updated_count).
"""
yaml_path = _get_presets_yaml_path()
if not yaml_path.exists():
logger.warning(f"Framework presets YAML not found at {yaml_path}, skipping seed")
return (0, 0)
try:
presets_data = _load_presets_from_yaml(yaml_path)
except Exception as e:
logger.error(f"Failed to load framework presets YAML: {e}")
return (0, 0)
if not presets_data:
logger.warning("No presets found in YAML file")
return (0, 0)
created = 0
updated = 0
for key, preset_data in presets_data.items():
kwargs = _yaml_preset_to_model_kwargs(key, preset_data)
# Check if preset already exists by name
result = await db.execute(select(FrameworkPreset).where(FrameworkPreset.name == key))
existing = result.scalar_one_or_none()
if existing is None:
# Create new preset
preset = FrameworkPreset(**kwargs)
db.add(preset)
created += 1
logger.debug(f"Created preset: {key}")
else:
# Update existing preset fields
for field, value in kwargs.items():
if field != "name": # Don't update the key itself
setattr(existing, field, value)
updated += 1
logger.debug(f"Updated preset: {key}")
await db.commit()
logger.info(
f"Framework presets seeded: {created} created, {updated} updated "
f"(total: {len(presets_data)} presets in YAML)"
)
return (created, updated)
| """Seed framework presets from config/framework-presets.yml into the database.
This module reads the YAML preset definitions and upserts them into the
framework_presets table on application startup, ensuring the database
always reflects the latest preset configurations.
"""
import os
from pathlib import Path
from typing import Any, Dict, Tuple
import yaml
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from lazy_bird.core.logging import get_logger
from lazy_bird.models.framework_preset import FrameworkPreset
logger = get_logger(__name__)
# Mapping from YAML preset key to (framework_type, language)
PRESET_METADATA: Dict[str, Tuple[str, str]] = {
# Game Engines
"godot": ("game_engine", "gdscript"),
"unity": ("game_engine", "csharp"),
"unreal": ("game_engine", "cpp"),
"bevy": ("game_engine", "rust"),
# Backend Frameworks
"django": ("backend", "python"),
"flask": ("backend", "python"),
"fastapi": ("backend", "python"),
"express": ("backend", "javascript"),
"rails": ("backend", "ruby"),
# Frontend Frameworks
"react": ("frontend", "javascript"),
"vue": ("frontend", "javascript"),
"angular": ("frontend", "typescript"),
"svelte": ("frontend", "javascript"),
# Languages
"python": ("language", "python"),
"rust": ("language", "rust"),
"go": ("language", "go"),
"nodejs": ("language", "javascript"),
"cpp": ("language", "cpp"),
"java": ("language", "java"),
# Custom | [
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/sql/_selectable_constructors.py\nselect",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/ext/asyncio/session.py\nAsyncSession",
"# yusufkaraaslan/lazy-bird:lazy_bird/core/logging.py\nget_logger",
"# yusufkaraaslan/lazy-bird:lazy_bird/models/framework_preset.py\nFrameworkPreset... | yusufkaraaslan/lazy-bird | lazy_bird/services/preset_seeder.py |
"""Pull Request/Merge Request service for GitHub and GitLab.
This module provides the PRService class for creating pull requests (GitHub)
and merge requests (GitLab) with templates, issue linking, and automation.
"""
import logging
import re
import subprocess
from datetime import datetime
from pathlib import Path
from typing import Dict, Any, Optional, List
logger = logging.getLogger(__name__)
class PRServiceError(Exception):
"""Base exception for PR service errors."""
pass
class PRCreationError(PRServiceError):
"""Exception raised when PR/MR creation fails."""
def __init__(self, platform: str, command: str, stderr: str):
self.platform = platform
self.command = command
self.stderr = stderr
super().__init__(
f"{platform} PR/MR creation failed:\n" f"Command: {command}\n" f"Error: {stderr}"
)
class PRService:
"""Service for creating pull requests and merge requests.
Supports both GitHub (via gh CLI) and GitLab (via glab CLI).
"""
def __init__(self, working_directory: Optional[Path] = None):
"""Initialize PRService.
Args:
working_directory: Directory to execute commands in (default: current)
"""
self.working_directory = working_directory or Path.cwd()
def create_pull_request(
self,
title: str,
body: str,
base_branch: str,
head_branch: str,
repository: Optional[str] = None,
labels: Optional[List[str]] = None,
draft: bool = False,
project_id: Optional[str] = None,
task_id: Optional[int] = None,
issue_number: Optional[int] = None,
) -> Dict[str, Any]:
"""Create a GitHub pull request.
Args:
title: PR title
body: PR description
base_branch: Target branch (e.g., "main")
head_branch: Source branch with changes
repository: Repository (e.g., "user/repo", optional if in repo)
labels: List of labels to add
draft: Create as draft PR
project_id: Project identifier for logging
task_id: Task ID for logging
issue_number: Original issue number to link and comment
Returns:
dict: PR creation result
- success: bool
- url: str (PR URL)
- number: int (PR number)
- platform: str ("github")
Raises:
PRCreationError: If PR creation fails
"""
logger.info(
f"Creating GitHub pull request",
extra={
"extra_fields": {
"project_id": project_id,
"task_id": task_id,
"title": title,
"base": base_branch,
"head": head_branch,
"draft": draft,
}
},
)
# Build gh pr create command
command = ["gh", "pr", "create"]
if repository:
command.extend(["--repo", repository])
command.extend(
[
"--title",
title,
"--body",
body,
"--base",
base_branch,
"--head",
head_branch,
]
)
# Add labels
if labels:
for label in labels:
command.extend(["--label", label])
# Draft PR
if draft:
command.append("--draft")
try:
# Execute gh pr create
result = subprocess.run(
command,
cwd=self.working_directory,
capture_output=True,
text=True,
timeout=60,
)
if result.returncode != 0:
raise PRCreationError("GitHub", " ".join(command), result.stderr)
# Extract PR URL from output
pr_url = result.stdout.strip()
# Get PR number from URL
pr_number = self._extract_pr_number_from_url(pr_url)
logger.info(
f"Pull request created successfully",
extra={
"extra_fields": {
"project_id": project_id,
"task_id": task_id,
"pr_url": pr_url,
"pr_number": pr_number,
}
},
)
# Comment on original issue if provided
if issue_number:
self._comment_on_github_issue(
issue_number=issue_number,
pr_url=pr_url,
project_id=project_id,
task_id=task_id,
repository=repository,
)
return {
"success": True,
"url": pr_url,
"number": pr_number,
"platform": "github",
}
except subprocess.TimeoutExpired as e:
error_msg = f"GitHub PR creation timed out after 60s"
logger.error(error_msg, extra={"extra_fields": {"project_id": project_id}})
raise PRServiceError(error_msg) from e
except Exception as e:
logger.error(
f"GitHub PR creation failed: {str(e)}",
extra={"extra_fields": {"project_id": project_id, "error": str(e)}},
exc_info=True,
)
raise
def create_merge_request(
self,
title: str,
body: str,
base_branch: str,
head_branch: str,
repository: Optional[str] = None,
labels: Optional[List[str]] = None,
draft: bool = False,
project_id: Optional[str] = None,
task_id: Optional[int] = None,
issue_number: Optional[int] = None,
) -> Dict[str, Any]:
"""Create a GitLab merge request.
Args:
title: MR title
body: MR description
base_branch: Target branch (e.g., "main")
head_branch: Source branch with changes
repository: Repository (e.g., "group/project", optional if in repo)
labels: List of labels to add
draft: Create as draft MR
project_id: Project identifier for logging
task_id: Task ID for logging
issue_number: Original issue number to link and comment
Returns:
dict: MR creation result
- success: bool
- url: str (MR URL)
- number: int (MR IID)
- platform: str ("gitlab")
Raises:
PRCreationError: If MR creation fails
"""
logger.info(
f"Creating GitLab merge request",
extra={
"extra_fields": {
"project_id": project_id,
"task_id": task_id,
"title": title,
"base": base_branch,
"head": head_branch,
"draft": draft,
}
},
)
# Build glab mr create command
command = ["glab", "mr", "create"]
if repository:
command.extend(["--repo", repository])
# GitLab uses --title for draft MRs
mr_title = f"Draft: {title}" if draft else title
command.extend(
[
"--title",
mr_title,
"--description",
body,
"--target-branch",
base_branch,
"--source-branch",
head_branch,
]
)
# Add labels
if labels:
command.extend(["--label", ",".join(labels)])
# Set to draft
if draft:
command.append("--draft")
try:
# Execute glab mr create
result = subprocess.run(
command,
cwd=self.working_directory,
capture_output=True,
text=True,
timeout=60,
)
if result.returncode != 0:
raise PRCreationError("GitLab", " ".join(command), result.stderr)
# Extract MR URL from output
mr_url = result.stdout.strip()
# Get MR IID from URL
mr_iid = self._extract_mr_iid_from_url(mr_url)
logger.info(
f"Merge request created successfully",
extra={
"extra_fields": {
"project_id": project_id,
"task_id": task_id,
"mr_url": mr_url,
"mr_iid": mr_iid,
}
},
)
# Comment on original issue if provided
if issue_number:
self._comment_on_gitlab_issue(
issue_number=issue_number,
mr_url=mr_url,
project_id=project_id,
task_id=task_id,
repository=repository,
)
return {
"success": True,
"url": mr_url,
"number": mr_iid,
"platform": "gitlab",
}
except subprocess.TimeoutExpired as e:
error_msg = f"GitLab MR creation timed out after 60s"
logger.error(error_msg, extra={"extra_fields": {"project_id": project_id}})
raise PRServiceError(error_msg) from e
except Exception as e:
logger.error(
f"GitLab MR creation failed: {str(e)}",
extra={"extra_fields": {"project_id": project_id, "error": str(e)}},
exc_info=True,
)
raise
def build_pr_body(
self,
task_description: str,
implementation_summary: Optional[str] = None,
test_results: Optional[Dict[str, Any]] = None,
diff_stats: Optional[Dict[str, int]] = None,
) -> str:
"""Build PR/MR body from task details.
Args:
task_description: Original task description
implementation_summary: Claude's implementation summary (optional)
test_results: Test execution results (optional)
diff_stats: Git diff statistics (optional)
Returns:
str: Formatted PR/MR body
"""
sections = []
# Task description
sections.append("## Task Description\n")
sections.append(task_description)
# Implementation summary
if implementation_summary:
sections.append("\n## Implementation Summary\n")
sections.append(implementation_summary)
# Test results
if test_results:
sections.append("\n## Test Results\n")
if test_results.get("success"):
sections.append(f"✅ All tests passed!")
sections.append(f"\n- **Total:** {test_results['stats']['total']}")
sections.append(f"- **Passed:** {test_results['stats']['passed']}")
sections.append(f"- **Execution Time:** {test_results['execution_time']:.2f}s")
else:
sections.append(f"⚠️ Some tests failed")
sections.append(f"\n- **Total:** {test_results['stats']['total']}")
sections.append(f"- **Passed:** {test_results['stats']['passed']}")
sections.append(f"- **Failed:** {test_results['stats']['failed']}")
# Diff stats
if diff_stats:
sections.append("\n## Changes\n")
sections.append(f"- **Files changed:** {diff_stats.get('files_changed', 0)}")
sections.append(f"- **Insertions:** +{diff_stats.get('insertions', 0)}")
sections.append(f"- **Deletions:** -{diff_stats.get('deletions', 0)}")
# Footer
sections.append("\n---\n")
sections.append("🤖 **This PR was automatically generated by Lazy-Bird**\n")
sections.append(f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S UTC')}\n")
sections.append("\nFor issues or questions: https://github.com/yusufkaraaslan/lazy-bird")
return "\n".join(sections)
def _extract_pr_number_from_url(self, url: str) -> int:
"""Extract PR number from GitHub URL.
Args:
url: GitHub PR URL
Returns:
int: PR number
"""
match = re.search(r"/pull/(\d+)", url)
if match:
return int(match.group(1))
return 0
def _extract_mr_iid_from_url(self, url: str) -> int:
"""Extract MR IID from GitLab URL.
Args:
url: GitLab MR URL
Returns:
int: MR IID (internal ID)
"""
match = re.search(r"/-/merge_requests/(\d+)", url)
if match:
return int(match.group(1))
return 0
def _comment_on_github_issue(
self,
issue_number: int,
pr_url: str,
project_id: Optional[str] = None,
task_id: Optional[int] = None,
repository: Optional[str] = None,
) -> None:
"""Add comment to GitHub issue linking to PR.
Args:
issue_number: Issue number
pr_url: Pull request URL
project_id: Project ID for logging
task_id: Task ID for logging
repository: Repository (optional)
"""
comment = f"""✅ **Implementation Complete**
**Pull Request:** {pr_url}
Please review the changes and merge if satisfied.
---
🤖 Automated by Lazy-Bird"""
command = ["gh", "issue", "comment", str(issue_number), "--body", comment]
if repository:
command.extend(["--repo", repository])
try:
subprocess.run(
command,
cwd=self.working_directory,
capture_output=True,
text=True,
timeout=30,
check=True,
)
logger.info(
f"Commented on GitHub issue #{issue_number}",
extra={
"extra_fields": {
"project_id": project_id,
"task_id": task_id,
"issue_number": issue_number,
}
},
)
except Exception as e:
# Log error but don't fail the PR creation
logger.warning(
f"Failed to comment on issue #{issue_number}: {str(e)}",
extra={"extra_fields": {"issue_number": issue_number, "error": str(e)}},
)
def _comment_on_gitlab_issue(
self,
issue_number: int,
mr_url: str,
project_id: Optional[str] = None,
task_id: Optional[int] = None,
repository: Optional[str] = None,
) -> None:
"""Add comment to GitLab issue linking to MR.
Args:
issue_number: Issue number (IID)
mr_url: Merge request URL
project_id: Project ID for logging
task_id: Task ID for logging
repository: Repository (optional)
"""
comment = f"""✅ **Implementation Complete**
**Merge Request:** {mr_url}
Please review the changes and merge if satisfied.
---
🤖 Automated by Lazy-Bird"""
command = ["glab", "issue", "note", str(issue_number), "--message", comment]
if repository:
command.extend(["--repo", repository])
try:
subprocess.run(
command,
cwd=self.working_directory,
capture_output=True,
text=True,
timeout=30,
check=True,
)
logger.info(
f"Commented on GitLab issue #{issue_number}",
extra={
"extra_fields": {
"project_id": project_id,
"task_id": task_id,
"issue_number": issue_number,
}
},
)
except Exception as e:
# Log error but don't fail the MR creation
logger.warning(
f"Failed to comment on issue #{issue_number}: {str(e)}",
extra={"extra_fields": {"issue_number": issue_number, "error": str(e)}},
)
__all__ = [
"PRService",
"PRServiceError",
"PRCreationError",
]
| """Pull Request/Merge Request service for GitHub and GitLab.
This module provides the PRService class for creating pull requests (GitHub)
and merge requests (GitLab) with templates, issue linking, and automation.
"""
import logging
import re
import subprocess
from datetime import datetime
from pathlib import Path
from typing import Dict, Any, Optional, List
logger = logging.getLogger(__name__)
class PRServiceError(Exception):
"""Base exception for PR service errors."""
pass
class PRCreationError(PRServiceError):
"""Exception raised when PR/MR creation fails."""
def __init__(self, platform: str, command: str, stderr: str):
self.platform = platform
self.command = command
self.stderr = stderr
super().__init__(
f"{platform} PR/MR creation failed:\n" f"Command: {command}\n" f"Error: {stderr}"
)
class PRService:
"""Service for creating pull requests and merge requests.
Supports both GitHub (via gh CLI) and GitLab (via glab CLI).
"""
def __init__(self, working_directory: Optional[Path] = None):
"""Initialize PRService.
Args:
working_directory: Directory to execute commands in (default: current)
"""
self.working_directory = working_directory or Path.cwd()
def create_pull_request(
self,
title: str,
body: str,
base_branch: str,
head_ | [] | yusufkaraaslan/lazy-bird | lazy_bird/services/pr_service.py |
"""Log publisher service for real-time log streaming via Redis Pub/Sub.
This module provides the LogPublisher class for publishing task execution logs
to Redis channels, enabling real-time log streaming to frontends via SSE.
"""
import json
import logging
from datetime import datetime, timezone
from typing import Dict, Any, Optional
from redis.exceptions import ConnectionError, RedisError
from lazy_bird.core.redis import get_async_redis, get_redis
logger = logging.getLogger(__name__)
class LogPublisher:
"""Service for publishing logs to Redis Pub/Sub channels.
Publishes log messages to Redis channels for real-time streaming.
Supports both synchronous and asynchronous publishing.
Channel naming convention:
- Task logs: `lazy_bird:logs:task:{task_id}`
- Project logs: `lazy_bird:logs:project:{project_id}`
- System logs: `lazy_bird:logs:system`
"""
# Channel prefix
CHANNEL_PREFIX = "lazy_bird:logs"
# Log retention (seconds) - logs expire after 1 hour by default
DEFAULT_LOG_TTL = 3600
def __init__(self, use_async: bool = False):
"""Initialize LogPublisher.
Args:
use_async: Use async Redis client (default: False)
"""
self.use_async = use_async
self._redis = None
def _get_client(self):
"""Get Redis client (sync or async).
Returns:
redis.Redis or AsyncRedis: Redis client
"""
if self._redis is None:
if self.use_async:
# Async client needs to be awaited
return None # Will be created in async methods
else:
self._redis = get_redis()
return self._redis
def publish_log(
self,
message: str,
level: str = "INFO",
task_id: Optional[str] = None,
project_id: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
) -> bool:
"""Publish log message to Redis channel (synchronous).
Args:
message: Log message
level: Log level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
task_id: Task ID (if task-specific log)
project_id: Project ID (if project-specific log)
metadata: Additional metadata to include
Returns:
bool: True if published successfully, False otherwise
Example:
>>> publisher = LogPublisher()
>>> publisher.publish_log(
... message="Task started",
... level="INFO",
... task_id="123e4567-e89b-12d3-a456-426614174000",
... project_id="my-project",
... )
True
"""
try:
client = self._get_client()
if client is None:
logger.error("Redis client not initialized")
return False
# Build log entry
log_entry = self._build_log_entry(
message=message,
level=level,
task_id=task_id,
project_id=project_id,
metadata=metadata,
)
# Determine channel
channel = self._get_channel(task_id=task_id, project_id=project_id)
# Publish to Redis
log_json = json.dumps(log_entry)
subscribers = client.publish(channel, log_json)
logger.debug(
f"Published log to {channel}",
extra={
"extra_fields": {
"channel": channel,
"subscribers": subscribers,
"level": level,
}
},
)
# Also store in a list for history (with TTL)
self._store_log_history(client, channel, log_json)
return True
except (ConnectionError, RedisError) as e:
logger.error(
f"Failed to publish log: {e}",
extra={"extra_fields": {"error": str(e)}},
)
return False
async def publish_log_async(
self,
message: str,
level: str = "INFO",
task_id: Optional[str] = None,
project_id: Optional[str] = None,
metadata: Optional[Dict[str, Any]] = None,
) -> bool:
"""Publish log message to Redis channel (asynchronous).
Args:
message: Log message
level: Log level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
task_id: Task ID (if task-specific log)
project_id: Project ID (if project-specific log)
metadata: Additional metadata to include
Returns:
bool: True if published successfully, False otherwise
Example:
>>> publisher = LogPublisher(use_async=True)
>>> await publisher.publish_log_async(
... message="Task started",
... level="INFO",
... task_id="123e4567-e89b-12d3-a456-426614174000",
... )
True
"""
try:
client = await get_async_redis()
# Build log entry
log_entry = self._build_log_entry(
message=message,
level=level,
task_id=task_id,
project_id=project_id,
metadata=metadata,
)
# Determine channel
channel = self._get_channel(task_id=task_id, project_id=project_id)
# Publish to Redis
log_json = json.dumps(log_entry)
subscribers = await client.publish(channel, log_json)
logger.debug(
f"Published log to {channel}",
extra={
"extra_fields": {
"channel": channel,
"subscribers": subscribers,
"level": level,
}
},
)
# Also store in a list for history (with TTL)
await self._store_log_history_async(client, channel, log_json)
return True
except (ConnectionError, RedisError) as e:
logger.error(
f"Failed to publish log: {e}",
extra={"extra_fields": {"error": str(e)}},
)
return False
def _build_log_entry(
self,
message: str,
level: str,
task_id: Optional[str],
project_id: Optional[str],
metadata: Optional[Dict[str, Any]],
) -> Dict[str, Any]:
"""Build structured log entry.
Args:
message: Log message
level: Log level
task_id: Task ID (optional)
project_id: Project ID (optional)
metadata: Additional metadata (optional)
Returns:
dict: Structured log entry
"""
entry = {
"timestamp": datetime.now(timezone.utc).isoformat(),
"level": level.upper(),
"message": message,
}
if task_id:
entry["task_id"] = task_id
if project_id:
entry["project_id"] = project_id
if metadata:
entry["metadata"] = metadata
return entry
def _get_channel(self, task_id: Optional[str] = None, project_id: Optional[str] = None) -> str:
"""Get Redis channel name based on context.
Args:
task_id: Task ID (optional)
project_id: Project ID (optional)
Returns:
str: Channel name
Channel priority:
1. Task-specific: lazy_bird:logs:task:{task_id}
2. Project-specific: lazy_bird:logs:project:{project_id}
3. System-wide: lazy_bird:logs:system
"""
if task_id:
return f"{self.CHANNEL_PREFIX}:task:{task_id}"
elif project_id:
return f"{self.CHANNEL_PREFIX}:project:{project_id}"
else:
return f"{self.CHANNEL_PREFIX}:system"
def _store_log_history(self, client, channel: str, log_json: str) -> None:
"""Store log in history list with TTL (synchronous).
Args:
client: Redis client
channel: Channel name
log_json: JSON-encoded log entry
"""
try:
# Store in a list for history
history_key = f"{channel}:history"
client.lpush(history_key, log_json)
# Limit list to last 1000 entries
client.ltrim(history_key, 0, 999)
# Set expiration on the history key
client.expire(history_key, self.DEFAULT_LOG_TTL)
except RedisError as e:
logger.warning(f"Failed to store log history: {e}")
async def _store_log_history_async(self, client, channel: str, log_json: str) -> None:
"""Store log in history list with TTL (asynchronous).
Args:
client: Async Redis client
channel: Channel name
log_json: JSON-encoded log entry
"""
try:
# Store in a list for history
history_key = f"{channel}:history"
await client.lpush(history_key, log_json)
# Limit list to last 1000 entries
await client.ltrim(history_key, 0, 999)
# Set expiration on the history key
await client.expire(history_key, self.DEFAULT_LOG_TTL)
except RedisError as e:
logger.warning(f"Failed to store log history: {e}")
def get_log_history(
self,
task_id: Optional[str] = None,
project_id: Optional[str] = None,
limit: int = 100,
) -> list:
"""Get log history from Redis (synchronous).
Args:
task_id: Task ID (optional)
project_id: Project ID (optional)
limit: Maximum number of logs to retrieve (default: 100)
Returns:
list: List of log entries (most recent first)
Example:
>>> publisher = LogPublisher()
>>> logs = publisher.get_log_history(task_id="task-123", limit=50)
>>> for log in logs:
... print(log["message"])
"""
try:
client = self._get_client()
if client is None:
return []
channel = self._get_channel(task_id=task_id, project_id=project_id)
history_key = f"{channel}:history"
# Get logs from history list
log_entries = client.lrange(history_key, 0, limit - 1)
# Parse JSON entries
return [json.loads(entry) for entry in log_entries]
except (ConnectionError, RedisError) as e:
logger.error(f"Failed to get log history: {e}")
return []
async def get_log_history_async(
self,
task_id: Optional[str] = None,
project_id: Optional[str] = None,
limit: int = 100,
) -> list:
"""Get log history from Redis (asynchronous).
Args:
task_id: Task ID (optional)
project_id: Project ID (optional)
limit: Maximum number of logs to retrieve (default: 100)
Returns:
list: List of log entries (most recent first)
Example:
>>> publisher = LogPublisher(use_async=True)
>>> logs = await publisher.get_log_history_async(task_id="task-123")
>>> for log in logs:
... print(log["message"])
"""
try:
client = await get_async_redis()
channel = self._get_channel(task_id=task_id, project_id=project_id)
history_key = f"{channel}:history"
# Get logs from history list
log_entries = await client.lrange(history_key, 0, limit - 1)
# Parse JSON entries
return [json.loads(entry) for entry in log_entries]
except (ConnectionError, RedisError) as e:
logger.error(f"Failed to get log history: {e}")
return []
# Convenience function for publishing logs
def publish_task_log(
message: str,
task_id: str,
level: str = "INFO",
project_id: Optional[str] = None,
**metadata,
) -> bool:
"""Convenience function to publish task-specific log.
Args:
message: Log message
task_id: Task ID
level: Log level (default: INFO)
project_id: Project ID (optional)
**metadata: Additional metadata as keyword arguments
Returns:
bool: True if published successfully
Example:
>>> publish_task_log(
... "Running tests",
... task_id="task-123",
... level="INFO",
... test_framework="pytest"
... )
True
"""
publisher = LogPublisher()
return publisher.publish_log(
message=message,
level=level,
task_id=task_id,
project_id=project_id,
metadata=metadata if metadata else None,
)
__all__ = [
"LogPublisher",
"publish_task_log",
]
| """Log publisher service for real-time log streaming via Redis Pub/Sub.
This module provides the LogPublisher class for publishing task execution logs
to Redis channels, enabling real-time log streaming to frontends via SSE.
"""
import json
import logging
from datetime import datetime, timezone
from typing import Dict, Any, Optional
from redis.exceptions import ConnectionError, RedisError
from lazy_bird.core.redis import get_async_redis, get_redis
logger = logging.getLogger(__name__)
class LogPublisher:
"""Service for publishing logs to Redis Pub/Sub channels.
Publishes log messages to Redis channels for real-time streaming.
Supports both synchronous and asynchronous publishing.
Channel naming convention:
- Task logs: `lazy_bird:logs:task:{task_id}`
- Project logs: `lazy_bird:logs:project:{project_id}`
- System logs: `lazy_bird:logs:system`
"""
# Channel prefix
CHANNEL_PREFIX = "lazy_bird:logs"
# Log retention (seconds) - logs expire after 1 hour by default
DEFAULT_LOG_TTL = 3600
def __init__(self, use_async: bool = False):
"""Initialize LogPublisher.
Args:
use_async: Use async Redis client (default: False)
"""
self.use_async = use_async
self._redis = None
def _get_client(self):
"""Get Redis client (sync or async).
Returns:
redis.Redis or AsyncRedis: Redis client
| [
"# redis/redis-py:redis/exceptions.py\nConnectionError",
"# yusufkaraaslan/lazy-bird:lazy_bird/core/redis.py\nget_async_redis"
] | yusufkaraaslan/lazy-bird | lazy_bird/services/log_publisher.py |
"""Git service for worktree and repository operations.
This module provides git operations for Lazy-Bird task execution:
- Creating isolated git worktrees for task execution
- Committing changes made by Claude
- Pushing branches to remote
- Cleaning up worktrees after task completion
All operations use subprocess to run git commands and include
comprehensive error handling and logging.
"""
import os
import shutil
import subprocess
from pathlib import Path
from typing import Dict, List, Optional, Tuple
from lazy_bird.core.config import settings
from lazy_bird.core.logging import get_logger
logger = get_logger(__name__)
class GitServiceError(Exception):
"""Base exception for GitService errors."""
pass
class WorktreeExistsError(GitServiceError):
"""Worktree already exists error."""
pass
class GitCommandError(GitServiceError):
"""Git command execution failed."""
def __init__(self, command: str, return_code: int, stderr: str):
self.command = command
self.return_code = return_code
self.stderr = stderr
super().__init__(f"Git command failed (exit {return_code}): {command}\n{stderr}")
class GitService:
"""Service for git repository and worktree operations.
This service handles all git operations required for task execution:
- Worktree creation and cleanup
- Branch management
- Committing changes
- Pushing to remote
Attributes:
project_path: Path to the project repository
worktree_base: Base directory for worktrees (from settings.WORKTREE_BASE_PATH)
git_user_name: Git user name for commits
git_user_email: Git user email for commits
Example:
>>> service = GitService("/path/to/project")
>>> worktree_path = service.create_worktree(
... project_id="my-project",
... task_id=42
... )
>>> service.commit_changes(
... worktree_path=worktree_path,
... message="Task #42: Add feature"
... )
>>> service.push_branch(worktree_path)
>>> service.cleanup_worktree(worktree_path)
"""
def __init__(
self,
project_path: str,
worktree_base: Optional[str] = None,
git_user_name: Optional[str] = None,
git_user_email: Optional[str] = None,
):
"""Initialize GitService.
Args:
project_path: Path to the main project repository
worktree_base: Base directory for worktrees (default: from settings)
git_user_name: Git user name for commits (default: from settings)
git_user_email: Git user email for commits (default: from settings)
"""
self.project_path = Path(project_path).resolve()
self.worktree_base = Path(worktree_base or settings.WORKTREE_BASE_PATH)
self.git_user_name = git_user_name or settings.GIT_USER_NAME
self.git_user_email = git_user_email or settings.GIT_USER_EMAIL
# Ensure worktree base directory exists
self.worktree_base.mkdir(parents=True, exist_ok=True)
logger.debug(
f"GitService initialized: project={self.project_path}, "
f"worktree_base={self.worktree_base}"
)
def _run_git(
self,
args: List[str],
cwd: Optional[Path] = None,
check: bool = True,
capture_output: bool = True,
) -> subprocess.CompletedProcess:
"""Run a git command.
Args:
args: Git command arguments (without 'git' prefix)
cwd: Working directory for command (default: project_path)
check: Raise exception on non-zero exit code
capture_output: Capture stdout/stderr
Returns:
CompletedProcess instance
Raises:
GitCommandError: If command fails and check=True
"""
cmd = ["git"] + args
cwd = cwd or self.project_path
logger.debug(f"Running git command: {' '.join(cmd)} (cwd={cwd})")
try:
result = subprocess.run(
cmd,
cwd=str(cwd),
capture_output=capture_output,
text=True,
check=False, # We'll check manually for better error messages
)
if check and result.returncode != 0:
raise GitCommandError(
command=" ".join(cmd),
return_code=result.returncode,
stderr=result.stderr.strip() if result.stderr else "",
)
return result
except FileNotFoundError:
raise GitServiceError("git command not found - is git installed?")
except Exception as e:
if isinstance(e, GitCommandError):
raise
raise GitServiceError(f"Failed to execute git command: {str(e)}") from e
def _get_base_branch(self) -> str:
"""Determine the base branch (main or master).
Returns:
Name of base branch (e.g., 'main' or 'master')
Raises:
GitServiceError: If neither main nor master exists
"""
# Check if remote main exists
result = self._run_git(
["show-ref", "--verify", "--quiet", "refs/remotes/origin/main"],
check=False,
)
if result.returncode == 0:
return "main"
# Check if remote master exists
result = self._run_git(
["show-ref", "--verify", "--quiet", "refs/remotes/origin/master"],
check=False,
)
if result.returncode == 0:
return "master"
# Fallback to local branches
result = self._run_git(
["show-ref", "--verify", "--quiet", "refs/heads/main"],
check=False,
)
if result.returncode == 0:
return "main"
result = self._run_git(
["show-ref", "--verify", "--quiet", "refs/heads/master"],
check=False,
)
if result.returncode == 0:
return "master"
raise GitServiceError("Could not determine base branch - neither main nor master found")
def create_worktree(
self,
project_id: str,
task_id: int,
base_branch: Optional[str] = None,
force: bool = False,
) -> Tuple[Path, str]:
"""Create a git worktree for task execution.
Creates an isolated git worktree with a feature branch for the task.
If a worktree already exists at the target path, it can be forcefully
recreated if force=True.
Args:
project_id: Project identifier (e.g., "my-project")
task_id: Task/issue number
base_branch: Base branch to branch from (default: auto-detect main/master)
force: Force recreation if worktree exists
Returns:
Tuple of (worktree_path, branch_name)
Raises:
WorktreeExistsError: If worktree exists and force=False
GitServiceError: If git operations fail
Example:
>>> worktree_path, branch_name = service.create_worktree("proj", 42)
>>> print(worktree_path)
/tmp/lazy-bird-worktrees/lazy-bird-agent-proj-42
>>> print(branch_name)
feature-proj-42
"""
branch_name = f"feature-{project_id}-{task_id}"
worktree_path = self.worktree_base / f"lazy-bird-agent-{project_id}-{task_id}"
logger.info(
f"[{project_id}] Creating worktree: {worktree_path}",
extra={
"extra_fields": {
"project_id": project_id,
"task_id": task_id,
"branch_name": branch_name,
"worktree_path": str(worktree_path),
}
},
)
# Check if worktree already exists
if worktree_path.exists():
if force:
logger.warning(f"[{project_id}] Worktree exists, forcing removal: {worktree_path}")
self.cleanup_worktree(worktree_path, branch_name)
else:
raise WorktreeExistsError(
f"Worktree already exists: {worktree_path}. Use force=True to recreate."
)
# Check if branch exists and delete it
result = self._run_git(
["show-ref", "--verify", "--quiet", f"refs/heads/{branch_name}"],
check=False,
)
if result.returncode == 0:
logger.warning(f"[{project_id}] Branch exists, deleting: {branch_name}")
self._run_git(["branch", "-D", branch_name], check=False)
# Fetch latest from remote
logger.info(f"[{project_id}] Fetching latest from remote...")
try:
self._run_git(["fetch", "origin"])
logger.debug(f"[{project_id}] Fetched latest from origin")
except GitCommandError as e:
logger.warning(f"[{project_id}] Failed to fetch from origin: {e.stderr}")
# Determine base branch
if not base_branch:
base_branch = self._get_base_branch()
logger.debug(f"[{project_id}] Using base branch: {base_branch}")
# Create worktree with new branch
logger.debug(f"[{project_id}] Creating worktree from origin/{base_branch}")
self._run_git(
[
"worktree",
"add",
"-b",
branch_name,
str(worktree_path),
f"origin/{base_branch}",
]
)
logger.info(
f"[{project_id}] Worktree created successfully",
extra={
"extra_fields": {
"worktree_path": str(worktree_path),
"branch_name": branch_name,
}
},
)
return worktree_path, branch_name
def commit_changes(
self,
worktree_path: Path,
message: str,
project_id: Optional[str] = None,
allow_empty: bool = False,
) -> str:
"""Commit all changes in the worktree.
Stages all changes (including deletions) and creates a commit.
Args:
worktree_path: Path to the worktree
message: Commit message
project_id: Project ID for logging (optional)
allow_empty: Allow empty commits
Returns:
str: Commit hash (short SHA)
Raises:
GitServiceError: If commit fails
Example:
>>> commit_hash = service.commit_changes(
... worktree_path=Path("/tmp/worktree"),
... message="Task #42: Add feature\\n\\nImplemented by Lazy-Bird"
... )
>>> print(commit_hash)
abc1234
"""
prefix = f"[{project_id}] " if project_id else ""
logger.info(
f"{prefix}Committing changes in {worktree_path}",
extra={
"extra_fields": {
"worktree_path": str(worktree_path),
"project_id": project_id,
}
},
)
# Stage all changes
self._run_git(["add", "-A"], cwd=worktree_path)
logger.debug(f"{prefix}Staged all changes")
# Create commit
cmd_args = ["commit", "-m", message]
if allow_empty:
cmd_args.append("--allow-empty")
# Set git user for this commit
env = os.environ.copy()
env["GIT_AUTHOR_NAME"] = self.git_user_name
env["GIT_AUTHOR_EMAIL"] = self.git_user_email
env["GIT_COMMITTER_NAME"] = self.git_user_name
env["GIT_COMMITTER_EMAIL"] = self.git_user_email
try:
result = subprocess.run(
["git"] + cmd_args,
cwd=str(worktree_path),
env=env,
capture_output=True,
text=True,
check=True,
)
except subprocess.CalledProcessError as e:
raise GitCommandError(
command=" ".join(["git"] + cmd_args),
return_code=e.returncode,
stderr=e.stderr.strip() if e.stderr else "",
)
# Get commit hash
result = self._run_git(["rev-parse", "--short", "HEAD"], cwd=worktree_path)
commit_hash = result.stdout.strip()
logger.info(
f"{prefix}Committed changes: {commit_hash}",
extra={"extra_fields": {"commit_hash": commit_hash}},
)
return commit_hash
def push_branch(
self,
worktree_path: Path,
remote: str = "origin",
force: bool = False,
project_id: Optional[str] = None,
) -> None:
"""Push the branch to remote repository.
Args:
worktree_path: Path to the worktree
remote: Remote name (default: "origin")
force: Use --force-with-lease for force push
project_id: Project ID for logging (optional)
Raises:
GitServiceError: If push fails
Example:
>>> service.push_branch(Path("/tmp/worktree"))
"""
prefix = f"[{project_id}] " if project_id else ""
logger.info(
f"{prefix}Pushing branch to {remote}",
extra={
"extra_fields": {
"worktree_path": str(worktree_path),
"remote": remote,
"force": force,
}
},
)
# Get current branch name
result = self._run_git(["rev-parse", "--abbrev-ref", "HEAD"], cwd=worktree_path)
branch_name = result.stdout.strip()
# Build push command
cmd_args = ["push", "-u", remote, branch_name]
if force:
cmd_args.insert(2, "--force-with-lease")
# Push branch
self._run_git(cmd_args, cwd=worktree_path)
logger.info(
f"{prefix}Pushed branch {branch_name} to {remote}",
extra={"extra_fields": {"branch_name": branch_name}},
)
def cleanup_worktree(
self,
worktree_path: Path,
branch_name: Optional[str] = None,
project_id: Optional[str] = None,
) -> None:
"""Clean up a git worktree and associated branch.
Removes the worktree and optionally deletes the local branch.
Args:
worktree_path: Path to the worktree to remove
branch_name: Branch name to delete (if None, won't delete branch)
project_id: Project ID for logging (optional)
Raises:
GitServiceError: If cleanup fails
Example:
>>> service.cleanup_worktree(
... Path("/tmp/worktree"),
... "feature-proj-42"
... )
"""
prefix = f"[{project_id}] " if project_id else ""
logger.info(
f"{prefix}Cleaning up worktree: {worktree_path}",
extra={
"extra_fields": {
"worktree_path": str(worktree_path),
"branch_name": branch_name,
}
},
)
# Remove worktree
if worktree_path.exists():
try:
self._run_git(
["worktree", "remove", str(worktree_path), "--force"],
check=False,
)
logger.debug(f"{prefix}Removed worktree via git")
except Exception as e:
logger.warning(f"{prefix}git worktree remove failed, using rm -rf: {e}")
shutil.rmtree(worktree_path, ignore_errors=True)
# Prune worktree list
try:
self._run_git(["worktree", "prune"], check=False)
except Exception as e:
logger.warning(f"{prefix}Failed to prune worktrees: {e}")
# Delete local branch if specified
if branch_name:
try:
self._run_git(["branch", "-D", branch_name], check=False)
logger.debug(f"{prefix}Deleted local branch: {branch_name}")
except Exception as e:
logger.warning(f"{prefix}Failed to delete branch: {e}")
logger.info(
f"{prefix}Worktree cleanup complete",
extra={"extra_fields": {"worktree_path": str(worktree_path)}},
)
def get_diff_stats(self, worktree_path: Path) -> Dict[str, int]:
"""Get statistics about changes in the worktree.
Args:
worktree_path: Path to the worktree
Returns:
dict: Statistics with keys:
- files_changed: Number of files changed
- insertions: Number of lines inserted
- deletions: Number of lines deleted
Example:
>>> stats = service.get_diff_stats(Path("/tmp/worktree"))
>>> print(stats)
{"files_changed": 5, "insertions": 120, "deletions": 30}
"""
# Get diff statistics
result = self._run_git(["diff", "--stat", "--cached"], cwd=worktree_path)
# Parse statistics (last line usually contains summary)
lines = result.stdout.strip().split("\n")
if not lines or len(lines) < 2:
return {"files_changed": 0, "insertions": 0, "deletions": 0}
# Parse summary line (e.g., "5 files changed, 120 insertions(+), 30 deletions(-)")
summary = lines[-1]
stats = {"files_changed": 0, "insertions": 0, "deletions": 0}
import re
if match := re.search(r"(\d+) files? changed", summary):
stats["files_changed"] = int(match.group(1))
if match := re.search(r"(\d+) insertions?", summary):
stats["insertions"] = int(match.group(1))
if match := re.search(r"(\d+) deletions?", summary):
stats["deletions"] = int(match.group(1))
return stats
__all__ = [
"GitService",
"GitServiceError",
"WorktreeExistsError",
"GitCommandError",
]
| """Git service for worktree and repository operations.
This module provides git operations for Lazy-Bird task execution:
- Creating isolated git worktrees for task execution
- Committing changes made by Claude
- Pushing branches to remote
- Cleaning up worktrees after task completion
All operations use subprocess to run git commands and include
comprehensive error handling and logging.
"""
import os
import shutil
import subprocess
from pathlib import Path
from typing import Dict, List, Optional, Tuple
from lazy_bird.core.config import settings
from lazy_bird.core.logging import get_logger
logger = get_logger(__name__)
class GitServiceError(Exception):
"""Base exception for GitService errors."""
pass
class WorktreeExistsError(GitServiceError):
"""Worktree already exists error."""
pass
class GitCommandError(GitServiceError):
"""Git command execution failed."""
def __init__(self, command: str, return_code: int, stderr: str):
self.command = command
self.return_code = return_code
self.stderr = stderr
super().__init__(f"Git command failed (exit {return_code}): {command}\n{stderr}")
class GitService:
"""Service for git repository and worktree operations.
This service handles all git operations required for task execution:
- Worktree creation and cleanup
- Branch management
- Committing changes
- Pushing to remote
Attributes:
project_path: Path to the project repository
worktree_base: Base directory for worktrees (from settings.WORKTREE_BASE_PATH)
git_user_name: Git user name for commits
git_user_email: Git | [
"# yusufkaraaslan/lazy-bird:lazy_bird/core/logging.py\nget_logger"
] | yusufkaraaslan/lazy-bird | lazy_bird/services/git_service.py |
"""Claude Code CLI service.
This module provides the ClaudeService class for executing Claude Code CLI
commands, parsing output, tracking token usage, and handling errors.
"""
import json
import logging
import re
import subprocess
from datetime import datetime, timezone
from pathlib import Path
from typing import Dict, Any, Optional, List, TYPE_CHECKING
if TYPE_CHECKING:
from lazy_bird.core.config import Settings
logger = logging.getLogger(__name__)
def _get_settings():
"""Lazy import settings to avoid circular dependencies."""
from lazy_bird.core.config import settings
return settings
class ClaudeServiceError(Exception):
"""Base exception for Claude service errors."""
pass
class ClaudeExecutionError(ClaudeServiceError):
"""Exception raised when Claude Code CLI execution fails."""
def __init__(self, command: str, return_code: int, stderr: str, stdout: str = ""):
self.command = command
self.return_code = return_code
self.stderr = stderr
self.stdout = stdout
super().__init__(
f"Claude execution failed with code {return_code}:\n"
f"Command: {command}\n"
f"Error: {stderr}"
)
class ClaudeTimeoutError(ClaudeServiceError):
"""Exception raised when Claude execution times out."""
pass
class ClaudeService:
"""Service for executing Claude Code CLI commands.
Handles Claude Code CLI execution, output parsing, token tracking,
and cost calculation.
Attributes:
api_key: Claude API key from settings
model: Claude model to use (default from settings)
max_tokens: Maximum tokens per request
temperature: Temperature setting for Claude
timeout: Execution timeout in seconds
"""
def __init__(
self,
api_key: Optional[str] = None,
model: Optional[str] = None,
max_tokens: Optional[int] = None,
temperature: Optional[float] = None,
timeout: Optional[int] = None,
):
"""Initialize ClaudeService.
Args:
api_key: Claude API key (defaults to settings.CLAUDE_API_KEY)
model: Model name (defaults to settings.CLAUDE_MODEL)
max_tokens: Max tokens (defaults to settings.CLAUDE_MAX_TOKENS)
temperature: Temperature (defaults to settings.CLAUDE_TEMPERATURE)
timeout: Timeout in seconds (defaults to 600)
"""
# Only load settings if any param is not provided
if not all([api_key, model, max_tokens, temperature is not None]):
settings = _get_settings()
self.api_key = api_key or settings.CLAUDE_API_KEY
self.model = model or settings.CLAUDE_MODEL
self.max_tokens = max_tokens or settings.CLAUDE_MAX_TOKENS
self.temperature = (
temperature if temperature is not None else settings.CLAUDE_TEMPERATURE
)
else:
self.api_key = api_key
self.model = model
self.max_tokens = max_tokens
self.temperature = temperature
self.timeout = timeout or 600 # Default 10 minutes
if not self.api_key:
raise ClaudeServiceError("Claude API key not configured")
def execute_claude(
self,
prompt: str,
working_directory: Path,
project_id: Optional[str] = None,
task_id: Optional[int] = None,
error_context: Optional[str] = None,
log_file: Optional[Path] = None,
) -> Dict[str, Any]:
"""Execute Claude Code CLI with the given prompt.
Args:
prompt: Task prompt for Claude
working_directory: Directory to execute Claude in
project_id: Project identifier for logging
task_id: Task ID for logging
error_context: Previous error context for retry attempts
log_file: Path to log file for Claude output
Returns:
dict: Execution result
- success: bool
- output: str (Claude's output)
- error: str (error message if failed)
- tokens_used: int
- cost: float
- execution_time: float (seconds)
- log_file: str (path to log file)
Raises:
ClaudeExecutionError: If Claude execution fails
ClaudeTimeoutError: If execution times out
"""
start_time = datetime.now(timezone.utc)
# Build full prompt with error context if retry
full_prompt = self._build_prompt(prompt, error_context)
# Determine log file path
if log_file is None:
# Use /tmp/lazy-bird-logs as default log directory
log_dir = Path("/tmp/lazy-bird-logs") / "claude"
log_dir.mkdir(parents=True, exist_ok=True)
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
prefix = f"{project_id}-{task_id}" if project_id and task_id else "claude"
log_file = log_dir / f"{prefix}_{timestamp}.log"
logger.info(
f"Executing Claude Code CLI",
extra={
"extra_fields": {
"project_id": project_id,
"task_id": task_id,
"working_directory": str(working_directory),
"log_file": str(log_file),
"has_error_context": error_context is not None,
}
},
)
# Build command
command = self._build_command(full_prompt, working_directory)
# Execute Claude
try:
result = subprocess.run(
command,
cwd=working_directory,
capture_output=True,
text=True,
timeout=self.timeout,
env={**subprocess.os.environ, "ANTHROPIC_API_KEY": self.api_key},
)
# Write output to log file
self._write_log(log_file, command, result, start_time)
# Parse output
execution_result = self._parse_output(
result, start_time, str(log_file), project_id, task_id
)
# Log result
log_level = "info" if execution_result["success"] else "error"
getattr(logger, log_level)(
f"Claude execution {'succeeded' if execution_result['success'] else 'failed'}",
extra={
"extra_fields": {
"project_id": project_id,
"task_id": task_id,
"success": execution_result["success"],
"tokens_used": execution_result["tokens_used"],
"cost": execution_result["cost"],
"execution_time": execution_result["execution_time"],
}
},
)
return execution_result
except subprocess.TimeoutExpired as e:
error_msg = f"Claude execution timed out after {self.timeout}s"
logger.error(
error_msg,
extra={
"extra_fields": {
"project_id": project_id,
"task_id": task_id,
"timeout": self.timeout,
}
},
)
raise ClaudeTimeoutError(error_msg) from e
except Exception as e:
logger.error(
f"Claude execution failed: {str(e)}",
extra={
"extra_fields": {
"project_id": project_id,
"task_id": task_id,
"error": str(e),
}
},
exc_info=True,
)
raise ClaudeServiceError(f"Unexpected error: {str(e)}") from e
def _build_prompt(self, base_prompt: str, error_context: Optional[str] = None) -> str:
"""Build full prompt with optional error context.
Args:
base_prompt: Base task prompt
error_context: Previous error context for retry
Returns:
str: Full prompt
"""
if error_context:
return f"{base_prompt}\n\n## Previous Attempt Error\n\n{error_context}\n\nPlease fix the issues above and try again."
return base_prompt
def _build_command(self, prompt: str, working_directory: Path) -> List[str]:
"""Build Claude Code CLI command.
Args:
prompt: Full prompt
working_directory: Working directory
Returns:
list: Command as list of strings
"""
command = [
"claude",
"-p",
prompt,
"--model",
self.model,
"--max-tokens",
str(self.max_tokens),
"--temperature",
str(self.temperature),
]
# Add output format for easier parsing
command.extend(["--output-format", "json"])
return command
def _write_log(
self,
log_file: Path,
command: List[str],
result: subprocess.CompletedProcess,
start_time: datetime,
) -> None:
"""Write execution log to file.
Args:
log_file: Path to log file
command: Command that was executed
result: Subprocess result
start_time: Execution start time
"""
end_time = datetime.now(timezone.utc)
duration = (end_time - start_time).total_seconds()
with open(log_file, "w") as f:
f.write("=" * 80 + "\n")
f.write("Claude Code CLI Execution Log\n")
f.write("=" * 80 + "\n\n")
f.write(f"Start Time: {start_time.isoformat()}\n")
f.write(f"End Time: {end_time.isoformat()}\n")
f.write(f"Duration: {duration:.2f}s\n")
f.write(f"Return Code: {result.returncode}\n\n")
f.write("Command:\n")
f.write(" ".join(command) + "\n\n")
f.write("=" * 80 + "\n")
f.write("STDOUT:\n")
f.write("=" * 80 + "\n")
f.write(result.stdout + "\n\n")
f.write("=" * 80 + "\n")
f.write("STDERR:\n")
f.write("=" * 80 + "\n")
f.write(result.stderr + "\n")
def _parse_output(
self,
result: subprocess.CompletedProcess,
start_time: datetime,
log_file: str,
project_id: Optional[str] = None,
task_id: Optional[int] = None,
) -> Dict[str, Any]:
"""Parse Claude output and extract token usage.
Args:
result: Subprocess result
start_time: Execution start time
log_file: Path to log file
project_id: Project ID for logging
task_id: Task ID for logging
Returns:
dict: Parsed result with success, output, tokens, cost
"""
end_time = datetime.now(timezone.utc)
execution_time = (end_time - start_time).total_seconds()
# Check if execution succeeded
success = result.returncode == 0
# Try to parse JSON output
tokens_used = 0
output = result.stdout
try:
# Claude CLI may output JSON with usage stats
json_output = json.loads(result.stdout)
if "usage" in json_output:
tokens_used = json_output["usage"].get("total_tokens", 0)
if "output" in json_output:
output = json_output["output"]
except (json.JSONDecodeError, KeyError):
# Fallback: try to extract from plain text output
tokens_used = self._extract_tokens_from_text(result.stdout + result.stderr)
# Calculate cost (example rates, adjust based on actual pricing)
cost = self._calculate_cost(tokens_used)
return {
"success": success,
"output": output,
"error": result.stderr if not success else "",
"tokens_used": tokens_used,
"cost": cost,
"execution_time": execution_time,
"log_file": log_file,
"return_code": result.returncode,
}
def _extract_tokens_from_text(self, text: str) -> int:
"""Extract token count from plain text output.
Args:
text: Output text
Returns:
int: Token count (0 if not found)
"""
# Look for patterns like "tokens used: 1234" or "total tokens: 1234"
patterns = [
r"total[_\s]+tokens[:\s]+(\d+)",
r"tokens[_\s]+used[:\s]+(\d+)",
r"(\d+)[_\s]+tokens",
]
for pattern in patterns:
match = re.search(pattern, text, re.IGNORECASE)
if match:
return int(match.group(1))
return 0
def _calculate_cost(self, tokens: int) -> float:
"""Calculate cost based on token usage.
Args:
tokens: Number of tokens used
Returns:
float: Cost in USD
Note:
Pricing is approximate and should be updated based on actual API pricing.
Current rates (example):
- Input: $0.008 / 1K tokens
- Output: $0.024 / 1K tokens
- Using average of $0.016 / 1K tokens
"""
if tokens == 0:
return 0.0
# Average rate per 1K tokens
rate_per_1k = 0.016
return (tokens / 1000) * rate_per_1k
def construct_task_prompt(
self,
project_name: str,
project_type: str,
project_id: str,
task_title: str,
task_body: str,
working_directory: Path,
) -> str:
"""Construct detailed task prompt for Claude.
Args:
project_name: Project name
project_type: Project type (e.g., "godot", "python")
project_id: Project identifier
task_title: Task title
task_body: Task description
working_directory: Working directory path
Returns:
str: Formatted prompt
"""
prompt = f"""# Task for {project_name}
**Project Type:** {project_type}
**Project ID:** {project_id}
**Working Directory:** {working_directory}
## Task: {task_title}
{task_body}
## Important Instructions
- You are working in the directory: {working_directory}
- This is a git worktree - changes will be committed automatically
- **DO NOT** run git commit commands - the automation system handles all git operations
- Focus on implementing the task requirements
- Write tests if applicable
- Ensure code follows project conventions
- The task will be validated by automated tests
Please implement the requested changes."""
return prompt
__all__ = [
"ClaudeService",
"ClaudeServiceError",
"ClaudeExecutionError",
"ClaudeTimeoutError",
]
| """Claude Code CLI service.
This module provides the ClaudeService class for executing Claude Code CLI
commands, parsing output, tracking token usage, and handling errors.
"""
import json
import logging
import re
import subprocess
from datetime import datetime, timezone
from pathlib import Path
from typing import Dict, Any, Optional, List, TYPE_CHECKING
if TYPE_CHECKING:
from lazy_bird.core.config import Settings
logger = logging.getLogger(__name__)
def _get_settings():
"""Lazy import settings to avoid circular dependencies."""
from lazy_bird.core.config import settings
return settings
class ClaudeServiceError(Exception):
"""Base exception for Claude service errors."""
pass
class ClaudeExecutionError(ClaudeServiceError):
"""Exception raised when Claude Code CLI execution fails."""
def __init__(self, command: str, return_code: int, stderr: str, stdout: str = ""):
self.command = command
self.return_code = return_code
self.stderr = stderr
self.stdout = stdout
super().__init__(
f"Claude execution failed with code {return_code}:\n"
f"Command: {command}\n"
f"Error: {stderr}"
)
class ClaudeTimeoutError(ClaudeServiceError):
"""Exception raised when Claude execution times out."""
pass
class ClaudeService:
"""Service for executing Claude Code CLI commands.
Handles Claude Code CLI execution, output parsing, token tracking,
and cost calculation.
Attributes:
api_key: Claude API key from settings
| [] | yusufkaraaslan/lazy-bird | lazy_bird/services/claude_service.py |
"""Pydantic schemas for WebhookSubscription API endpoints."""
from datetime import datetime
from typing import Optional
from uuid import UUID
from pydantic import BaseModel, ConfigDict, Field, HttpUrl
class WebhookSubscriptionBase(BaseModel):
"""Base WebhookSubscription schema with shared fields."""
url: HttpUrl = Field(
...,
description="Webhook endpoint URL (must be http/https)",
examples=["https://example.com/webhooks/lazy-bird"],
)
secret: str = Field(
...,
min_length=16,
max_length=255,
description="Secret for HMAC signature verification",
)
project_id: Optional[UUID] = Field(
default=None,
description="Project ID (NULL for global subscriptions)",
)
events: list[str] = Field(
...,
min_length=1,
description="Array of event types to subscribe to",
examples=[["task.completed", "task.failed", "pr.created"]],
)
is_active: bool = Field(
default=True,
description="Whether subscription is active",
)
description: Optional[str] = Field(
default=None,
description="Subscription description",
)
class WebhookSubscriptionCreate(WebhookSubscriptionBase):
"""Schema for creating a new webhook subscription."""
pass
class WebhookSubscriptionUpdate(BaseModel):
"""Schema for updating an existing webhook subscription."""
url: Optional[HttpUrl] = Field(default=None)
secret: Optional[str] = Field(default=None, min_length=16, max_length=255)
events: Optional[list[str]] = Field(default=None, min_length=1)
is_active: Optional[bool] = Field(default=None)
description: Optional[str] = Field(default=None)
model_config = ConfigDict(extra="forbid")
class WebhookSubscriptionResponse(WebhookSubscriptionBase):
"""Schema for webhook subscription API responses."""
id: UUID = Field(..., description="Unique subscription identifier")
last_triggered_at: Optional[datetime] = Field(
default=None,
description="Last webhook delivery time",
)
failure_count: int = Field(..., ge=0, description="Number of consecutive failures")
last_failure_at: Optional[datetime] = Field(
default=None,
description="Last failure timestamp",
)
created_at: datetime = Field(..., description="Creation timestamp")
updated_at: datetime = Field(..., description="Last update timestamp")
model_config = ConfigDict(from_attributes=True)
class WebhookSubscriptionListResponse(BaseModel):
"""Schema for paginated webhook subscription list responses."""
items: list[WebhookSubscriptionResponse] = Field(..., description="List of subscriptions")
total: int = Field(..., ge=0, description="Total number of subscriptions")
page: int = Field(..., ge=1, description="Current page number")
page_size: int = Field(..., ge=1, le=100, description="Items per page")
pages: int = Field(..., ge=0, description="Total number of pages")
model_config = ConfigDict(from_attributes=True)
| """Pydantic schemas for WebhookSubscription API endpoints."""
from datetime import datetime
from typing import Optional
from uuid import UUID
from pydantic import BaseModel, ConfigDict, Field, HttpUrl
class WebhookSubscriptionBase(BaseModel):
"""Base WebhookSubscription schema with shared fields."""
url: HttpUrl = Field(
...,
description="Webhook endpoint URL (must be http/https)",
examples=["https://example.com/webhooks/lazy-bird"],
)
secret: str = Field(
...,
min_length=16,
max_length=255,
description="Secret for HMAC signature verification",
)
project_id: Optional[UUID] = Field(
default=None,
description="Project ID (NULL for global subscriptions)",
)
events: list[str] = Field(
...,
min_length=1,
description="Array of event types to subscribe to",
examples=[["task.completed", "task.failed", "pr.created"]],
)
is_active: bool = Field(
default=True,
description="Whether subscription is active",
)
description: Optional[str] = Field(
default=None,
description="Subscription description",
)
class WebhookSubscriptionCreate(WebhookSubscriptionBase):
"""Schema for creating a new webhook subscription."""
pass
class WebhookSubscriptionUpdate(BaseModel):
"""Schema for updating an existing | [
"# pydantic/pydantic:pydantic/main.py\nBaseModel",
"# pydantic/pydantic:pydantic/v1/networks.py\nHttpUrl",
"# pydantic/pydantic:pydantic/v1/fields.py\nField",
"# pydantic/pydantic:pydantic/config.py\nConfigDict"
] | yusufkaraaslan/lazy-bird | lazy_bird/schemas/webhook.py |
"""Pydantic schemas for Auth API endpoints."""
from datetime import datetime
from typing import Optional
from uuid import UUID
from pydantic import BaseModel, ConfigDict, EmailStr, Field
class UserCreate(BaseModel):
"""Schema for user registration."""
email: EmailStr = Field(
...,
description="User email address",
examples=["user@example.com"],
)
password: str = Field(
...,
min_length=8,
max_length=128,
description="User password (8-128 characters)",
)
display_name: Optional[str] = Field(
default=None,
max_length=255,
description="Human-readable display name",
examples=["John Doe"],
)
class UserLogin(BaseModel):
"""Schema for user login."""
email: EmailStr = Field(
...,
description="User email address",
examples=["user@example.com"],
)
password: str = Field(
...,
description="User password",
)
class UserResponse(BaseModel):
"""Schema for user responses."""
id: UUID = Field(..., description="Unique user identifier")
email: str = Field(..., description="User email address")
display_name: Optional[str] = Field(default=None, description="Display name")
role: str = Field(..., description="User role (admin or user)")
is_active: bool = Field(..., description="Whether account is active")
created_at: datetime = Field(..., description="Account creation timestamp")
model_config = ConfigDict(from_attributes=True)
class TokenResponse(BaseModel):
"""Schema for JWT token responses."""
access_token: str = Field(..., description="JWT access token")
refresh_token: str = Field(..., description="JWT refresh token")
token_type: str = Field(default="bearer", description="Token type")
class RefreshRequest(BaseModel):
"""Schema for token refresh requests."""
refresh_token: str = Field(
...,
description="JWT refresh token",
)
| """Pydantic schemas for Auth API endpoints."""
from datetime import datetime
from typing import Optional
from uuid import UUID
from pydantic import BaseModel, ConfigDict, EmailStr, Field
class UserCreate(BaseModel):
"""Schema for user registration."""
email: EmailStr = Field(
...,
description="User email address",
examples=["user@example.com"],
)
password: str = Field(
...,
min_length=8,
max_length=128,
description="User password (8-128 characters)",
)
display_name: Optional[str] = Field(
default=None,
max_length=255,
description="Human-readable display name",
examples=["John Doe"],
)
class UserLogin(BaseModel):
"""Schema for user login."""
email: EmailStr = Field(
...,
description="User email address",
examples=["user@example.com"],
)
password: str = Field(
...,
description="User password",
)
class UserResponse(BaseModel):
"""Schema for user responses."""
id: UUID = Field(..., description="Unique user identifier")
email: str = Field(..., description="User email address")
display_name: Optional[str] = Field(default=None, description="Display name")
role: str = Field(..., description="User role (admin or user)")
is_active: bool = Field(..., description=" | [
"# pydantic/pydantic:pydantic/main.py\nBaseModel",
"# pydantic/pydantic:pydantic/v1/networks.py\nEmailStr",
"# pydantic/pydantic:pydantic/v1/fields.py\nField",
"# pydantic/pydantic:pydantic/config.py\nConfigDict"
] | yusufkaraaslan/lazy-bird | lazy_bird/schemas/user.py |
"""Pydantic schemas for TaskRun API endpoints."""
from datetime import datetime
from decimal import Decimal
from typing import Any, Dict, Literal, Optional
from uuid import UUID
from pydantic import BaseModel, ConfigDict, Field
class TaskRunQueue(BaseModel):
"""Schema for queuing a new task."""
project_id: UUID = Field(..., description="Project identifier")
claude_account_id: Optional[UUID] = Field(default=None, description="Claude account to use")
work_item_id: str = Field(
...,
min_length=1,
max_length=255,
description="External work item ID",
examples=["issue-42", "JIRA-123"],
)
work_item_url: Optional[str] = Field(default=None, max_length=500)
work_item_title: Optional[str] = Field(default=None, max_length=500)
work_item_description: Optional[str] = Field(default=None)
task_type: str = Field(
default="feature",
max_length=50,
description="Task type: feature, bugfix, refactor, docs, etc.",
)
complexity: Optional[Literal["simple", "medium", "complex"]] = Field(
default=None,
description="Task complexity level",
)
prompt: str = Field(
...,
min_length=1,
description="Prompt sent to Claude for task execution",
)
max_retries: int = Field(
default=3,
ge=0,
le=5,
description="Maximum retry attempts",
)
metadata: Optional[Dict[str, Any]] = Field(
default=None,
description="Additional task metadata",
)
class TaskRunUpdate(BaseModel):
"""Schema for updating task run status/results."""
status: Optional[Literal["queued", "running", "success", "failed", "cancelled", "timeout"]] = (
Field(
default=None,
description="Execution status",
)
)
branch_name: Optional[str] = Field(default=None, max_length=255)
worktree_path: Optional[str] = Field(default=None, max_length=500)
commit_sha: Optional[str] = Field(default=None, max_length=40)
pr_url: Optional[str] = Field(default=None, max_length=500)
pr_number: Optional[int] = Field(default=None)
tests_passed: Optional[bool] = Field(default=None)
test_output: Optional[str] = Field(default=None)
error_message: Optional[str] = Field(default=None)
tokens_used: Optional[int] = Field(default=None, ge=0)
cost_usd: Optional[Decimal] = Field(default=None, ge=Decimal("0"))
task_metadata: Optional[Dict[str, Any]] = Field(default=None)
model_config = ConfigDict(extra="forbid")
class TaskRunResponse(BaseModel):
"""Schema for task run API responses."""
id: UUID = Field(..., description="Unique task run identifier")
project_id: UUID = Field(..., description="Project identifier")
claude_account_id: Optional[UUID] = Field(default=None)
work_item_id: str = Field(..., description="External work item ID")
work_item_url: Optional[str] = Field(default=None)
work_item_title: Optional[str] = Field(default=None)
task_type: str = Field(..., description="Task type")
complexity: Optional[Literal["simple", "medium", "complex"]] = Field(default=None)
status: Literal["queued", "running", "success", "failed", "cancelled", "timeout"] = Field(
..., description="Execution status"
)
started_at: Optional[datetime] = Field(default=None)
completed_at: Optional[datetime] = Field(default=None)
duration_seconds: Optional[int] = Field(default=None)
retry_count: int = Field(..., description="Number of retries attempted")
max_retries: int = Field(..., description="Maximum retries allowed")
branch_name: Optional[str] = Field(default=None)
commit_sha: Optional[str] = Field(default=None)
pr_url: Optional[str] = Field(default=None)
pr_number: Optional[int] = Field(default=None)
tests_passed: Optional[bool] = Field(default=None)
tokens_used: Optional[int] = Field(default=None)
cost_usd: Optional[Decimal] = Field(default=None)
error_message: Optional[str] = Field(default=None)
task_metadata: Optional[Dict[str, Any]] = Field(default=None)
created_at: datetime = Field(..., description="Creation timestamp")
updated_at: datetime = Field(..., description="Last update timestamp")
model_config = ConfigDict(from_attributes=True)
class TaskRunListResponse(BaseModel):
"""Schema for paginated task run list responses."""
items: list[TaskRunResponse] = Field(..., description="List of task runs")
total: int = Field(..., ge=0, description="Total number of task runs")
page: int = Field(..., ge=1, description="Current page number")
page_size: int = Field(..., ge=1, le=100, description="Items per page")
pages: int = Field(..., ge=0, description="Total number of pages")
model_config = ConfigDict(from_attributes=True)
| """Pydantic schemas for TaskRun API endpoints."""
from datetime import datetime
from decimal import Decimal
from typing import Any, Dict, Literal, Optional
from uuid import UUID
from pydantic import BaseModel, ConfigDict, Field
class TaskRunQueue(BaseModel):
"""Schema for queuing a new task."""
project_id: UUID = Field(..., description="Project identifier")
claude_account_id: Optional[UUID] = Field(default=None, description="Claude account to use")
work_item_id: str = Field(
...,
min_length=1,
max_length=255,
description="External work item ID",
examples=["issue-42", "JIRA-123"],
)
work_item_url: Optional[str] = Field(default=None, max_length=500)
work_item_title: Optional[str] = Field(default=None, max_length=500)
work_item_description: Optional[str] = Field(default=None)
task_type: str = Field(
default="feature",
max_length=50,
description="Task type: feature, bugfix, refactor, docs, etc.",
)
complexity: Optional[Literal["simple", "medium", "complex"]] = Field(
default=None,
description="Task complexity level",
)
prompt: str = Field(
...,
min_length=1,
description="Prompt sent to Claude for task execution",
)
max_retries: int = Field(
default= | [
"# pydantic/pydantic:pydantic/main.py\nBaseModel",
"# pydantic/pydantic:pydantic/v1/fields.py\nField",
"# pydantic/pydantic:pydantic/config.py\nConfigDict"
] | yusufkaraaslan/lazy-bird | lazy_bird/schemas/task_run.py |
"""Pydantic schemas for Project API endpoints.
This module defines validation schemas for Project CRUD operations.
Uses Pydantic v2 with comprehensive validation and documentation.
"""
from datetime import datetime
from decimal import Decimal
from typing import Optional
from uuid import UUID
from pydantic import BaseModel, ConfigDict, Field, HttpUrl, field_validator
class ProjectBase(BaseModel):
"""Base Project schema with shared fields.
This schema contains fields common to both create and update operations.
"""
name: str = Field(
...,
min_length=1,
max_length=255,
description="Human-readable project name",
examples=["My Godot Game", "Backend API"],
)
repo_url: str = Field(
...,
min_length=1,
max_length=500,
description="Git repository URL (GitHub, GitLab, etc.)",
examples=["https://github.com/user/my-game"],
)
default_branch: str = Field(
default="main",
min_length=1,
max_length=100,
description="Default git branch for task execution",
examples=["main", "master", "develop"],
)
project_type: str = Field(
...,
min_length=1,
max_length=50,
description="Project type: python, nodejs, rust, godot, etc.",
examples=["godot", "python", "nodejs", "rust"],
)
# Optional: Framework preset
framework_preset_id: Optional[UUID] = Field(
default=None,
description="Reference to framework preset (optional)",
)
# Optional: Custom commands (override preset)
test_command: Optional[str] = Field(
default=None,
max_length=500,
description="Custom test command (overrides preset)",
examples=["pytest tests/"],
)
build_command: Optional[str] = Field(
default=None,
max_length=500,
description="Custom build command (overrides preset)",
examples=["npm run build"],
)
lint_command: Optional[str] = Field(
default=None,
max_length=500,
description="Custom lint command (overrides preset)",
examples=["flake8 ."],
)
format_command: Optional[str] = Field(
default=None,
max_length=500,
description="Custom format command (overrides preset)",
examples=["black ."],
)
# Automation settings
automation_enabled: bool = Field(
default=False,
description="Whether automation is active for this project",
)
ready_state_name: Optional[str] = Field(
default=None,
max_length=100,
description="State name for ready tasks (e.g., 'Ready', 'To Do')",
examples=["Ready", "To Do", "Backlog"],
)
in_progress_state_name: str = Field(
default="In Progress",
max_length=100,
description="State name for running tasks",
)
review_state_name: str = Field(
default="In Review",
max_length=100,
description="State name for tasks in review",
)
done_state_name: str = Field(
default="Done",
max_length=100,
description="State name for completed tasks",
)
# Resource limits
max_concurrent_tasks: int = Field(
default=3,
ge=1,
le=10,
description="Maximum number of parallel task executions",
)
task_timeout_seconds: int = Field(
default=1800,
ge=300,
le=7200,
description="Task execution timeout in seconds",
)
max_cost_per_task_usd: Decimal = Field(
default=Decimal("5.00"),
ge=Decimal("0.01"),
le=Decimal("100.00"),
description="Maximum cost per task in USD",
)
daily_cost_limit_usd: Decimal = Field(
default=Decimal("50.00"),
ge=Decimal("1.00"),
le=Decimal("1000.00"),
description="Daily total cost limit in USD",
)
# Integration settings
github_installation_id: Optional[int] = Field(
default=None,
description="GitHub App installation ID for this project",
)
gitlab_project_id: Optional[int] = Field(
default=None,
description="GitLab project ID for this project",
)
source_platform: Optional[str] = Field(
default=None,
max_length=50,
description="Source platform: github, gitlab, plane, etc.",
examples=["github", "gitlab", "plane"],
)
source_platform_url: Optional[str] = Field(
default=None,
max_length=500,
description="Platform URL for web UI integration",
)
# Claude account
claude_account_id: Optional[UUID] = Field(
default=None,
description="Reference to Claude API account",
)
@field_validator("project_type")
@classmethod
def validate_project_type(cls, v: str) -> str:
"""Validate project_type is reasonable."""
valid_types = [
"python",
"nodejs",
"rust",
"go",
"java",
"csharp",
"godot",
"unity",
"bevy",
"unreal",
"django",
"fastapi",
"flask",
"rails",
"express",
"react",
"vue",
"angular",
"svelte",
"custom",
]
if v.lower() not in valid_types:
# Allow any type, just warn in logs
pass
return v.lower()
@field_validator("source_platform")
@classmethod
def validate_source_platform(cls, v: Optional[str]) -> Optional[str]:
"""Validate source_platform is known platform."""
if v is None:
return v
valid_platforms = ["github", "gitlab", "plane", "linear", "jira", "custom"]
if v.lower() not in valid_platforms:
# Allow any platform
pass
return v.lower()
class ProjectCreate(ProjectBase):
"""Schema for creating a new project.
Requires slug in addition to base fields.
"""
slug: str = Field(
...,
min_length=1,
max_length=100,
pattern=r"^[a-z0-9-]+$",
description="URL-safe unique identifier (lowercase, alphanumeric, hyphens)",
examples=["my-godot-game", "backend-api"],
)
@field_validator("slug")
@classmethod
def validate_slug(cls, v: str) -> str:
"""Validate slug format."""
if not v:
raise ValueError("slug cannot be empty")
if v.startswith("-") or v.endswith("-"):
raise ValueError("slug cannot start or end with hyphen")
if "--" in v:
raise ValueError("slug cannot contain consecutive hyphens")
return v.lower()
class ProjectUpdate(BaseModel):
"""Schema for updating an existing project.
All fields are optional - only provided fields will be updated.
"""
slug: Optional[str] = Field(
default=None,
min_length=1,
max_length=100,
pattern=r"^[a-z0-9-]+$",
description="URL-safe unique identifier (lowercase, alphanumeric, hyphens)",
)
name: Optional[str] = Field(
default=None,
min_length=1,
max_length=255,
description="Human-readable project name",
)
repo_url: Optional[str] = Field(
default=None,
min_length=1,
max_length=500,
description="Git repository URL",
)
default_branch: Optional[str] = Field(
default=None,
min_length=1,
max_length=100,
description="Default git branch",
)
project_type: Optional[str] = Field(
default=None,
min_length=1,
max_length=50,
description="Project type",
)
framework_preset_id: Optional[UUID] = Field(
default=None,
description="Framework preset ID",
)
test_command: Optional[str] = Field(default=None, max_length=500)
build_command: Optional[str] = Field(default=None, max_length=500)
lint_command: Optional[str] = Field(default=None, max_length=500)
format_command: Optional[str] = Field(default=None, max_length=500)
automation_enabled: Optional[bool] = Field(default=None)
ready_state_name: Optional[str] = Field(default=None, max_length=100)
in_progress_state_name: Optional[str] = Field(default=None, max_length=100)
review_state_name: Optional[str] = Field(default=None, max_length=100)
done_state_name: Optional[str] = Field(default=None, max_length=100)
max_concurrent_tasks: Optional[int] = Field(default=None, ge=1, le=10)
task_timeout_seconds: Optional[int] = Field(default=None, ge=300, le=7200)
max_cost_per_task_usd: Optional[Decimal] = Field(
default=None, ge=Decimal("0.01"), le=Decimal("100.00")
)
daily_cost_limit_usd: Optional[Decimal] = Field(
default=None, ge=Decimal("1.00"), le=Decimal("1000.00")
)
github_installation_id: Optional[int] = Field(default=None)
gitlab_project_id: Optional[int] = Field(default=None)
source_platform: Optional[str] = Field(default=None, max_length=50)
source_platform_url: Optional[str] = Field(default=None, max_length=500)
claude_account_id: Optional[UUID] = Field(default=None)
model_config = ConfigDict(extra="forbid") # Reject unknown fields
class ProjectResponse(ProjectBase):
"""Schema for project API responses.
Includes all fields plus id, slug, timestamps, and relationships.
"""
id: UUID = Field(..., description="Unique project identifier")
slug: str = Field(..., description="URL-safe unique identifier")
created_at: datetime = Field(..., description="Creation timestamp")
updated_at: datetime = Field(..., description="Last update timestamp")
deleted_at: Optional[datetime] = Field(
default=None,
description="Soft delete timestamp (NULL if active)",
)
# Computed field
is_active: bool = Field(
default=True,
description="Whether project is active (not deleted)",
)
# Optional statistics (only included in list view)
total_tasks: Optional[int] = Field(
default=None,
description="Total number of tasks for this project",
)
tasks_queued: Optional[int] = Field(
default=None,
description="Number of queued tasks",
)
tasks_running: Optional[int] = Field(
default=None,
description="Number of running tasks",
)
tasks_success: Optional[int] = Field(
default=None,
description="Number of successful tasks",
)
tasks_failed: Optional[int] = Field(
default=None,
description="Number of failed tasks",
)
last_task_at: Optional[datetime] = Field(
default=None,
description="Timestamp of last task execution",
)
model_config = ConfigDict(from_attributes=True) # Enable ORM mode
class ProjectListResponse(BaseModel):
"""Schema for paginated project list responses."""
items: list[ProjectResponse] = Field(..., description="List of projects")
total: int = Field(..., ge=0, description="Total number of projects")
page: int = Field(..., ge=1, description="Current page number")
page_size: int = Field(..., ge=1, le=100, description="Items per page")
pages: int = Field(..., ge=0, description="Total number of pages")
model_config = ConfigDict(from_attributes=True)
| """Pydantic schemas for Project API endpoints.
This module defines validation schemas for Project CRUD operations.
Uses Pydantic v2 with comprehensive validation and documentation.
"""
from datetime import datetime
from decimal import Decimal
from typing import Optional
from uuid import UUID
from pydantic import BaseModel, ConfigDict, Field, HttpUrl, field_validator
class ProjectBase(BaseModel):
"""Base Project schema with shared fields.
This schema contains fields common to both create and update operations.
"""
name: str = Field(
...,
min_length=1,
max_length=255,
description="Human-readable project name",
examples=["My Godot Game", "Backend API"],
)
repo_url: str = Field(
...,
min_length=1,
max_length=500,
description="Git repository URL (GitHub, GitLab, etc.)",
examples=["https://github.com/user/my-game"],
)
default_branch: str = Field(
default="main",
min_length=1,
max_length=100,
description="Default git branch for task execution",
examples=["main", "master", "develop"],
)
project_type: str = Field(
...,
min_length=1,
max_length=50,
description="Project type: python, nodejs, rust, godot, etc.",
examples=["godot", "python | [
"# pydantic/pydantic:pydantic/main.py\nBaseModel",
"# pydantic/pydantic:pydantic/v1/fields.py\nField",
"# pydantic/pydantic:pydantic/functional_validators.py\nfield_validator",
"# pydantic/pydantic:pydantic/config.py\nConfigDict"
] | yusufkaraaslan/lazy-bird | lazy_bird/schemas/project.py |
"""Pydantic schemas for FrameworkPreset API endpoints."""
from datetime import datetime
from typing import Any, Dict, Optional
from uuid import UUID
from pydantic import BaseModel, ConfigDict, Field
class FrameworkPresetBase(BaseModel):
"""Base FrameworkPreset schema with shared fields."""
name: str = Field(
...,
min_length=1,
max_length=100,
pattern=r"^[a-z0-9-]+$",
description="Internal preset name (lowercase, unique)",
examples=["godot", "django", "react"],
)
display_name: str = Field(
...,
min_length=1,
max_length=255,
description="Human-readable display name",
examples=["Godot Engine 4.x", "Django", "React + Vite"],
)
description: Optional[str] = Field(
default=None,
description="Preset description",
)
framework_type: str = Field(
...,
min_length=1,
max_length=50,
description="Framework category: game_engine, backend, frontend, language",
examples=["game_engine", "backend", "frontend", "language"],
)
language: Optional[str] = Field(
default=None,
max_length=50,
description="Programming language",
examples=["gdscript", "python", "javascript", "rust"],
)
test_command: str = Field(
...,
min_length=1,
max_length=500,
description="Command to run tests (required)",
examples=["pytest", "npm test", "cargo test"],
)
build_command: Optional[str] = Field(
default=None,
max_length=500,
description="Command to build project",
)
lint_command: Optional[str] = Field(
default=None,
max_length=500,
description="Command to lint code",
)
format_command: Optional[str] = Field(
default=None,
max_length=500,
description="Command to format code",
)
config_files: Optional[Dict[str, Any]] = Field(
default=None,
description="Framework-specific config file paths (JSON)",
examples=[{"godot": "project.godot", "python": "pyproject.toml"}],
)
class FrameworkPresetCreate(FrameworkPresetBase):
"""Schema for creating a new framework preset."""
pass
class FrameworkPresetUpdate(BaseModel):
"""Schema for updating an existing framework preset."""
display_name: Optional[str] = Field(default=None, min_length=1, max_length=255)
description: Optional[str] = Field(default=None)
framework_type: Optional[str] = Field(default=None, max_length=50)
language: Optional[str] = Field(default=None, max_length=50)
test_command: Optional[str] = Field(default=None, max_length=500)
build_command: Optional[str] = Field(default=None, max_length=500)
lint_command: Optional[str] = Field(default=None, max_length=500)
format_command: Optional[str] = Field(default=None, max_length=500)
config_files: Optional[Dict[str, Any]] = Field(default=None)
model_config = ConfigDict(extra="forbid")
class FrameworkPresetResponse(FrameworkPresetBase):
"""Schema for framework preset API responses."""
id: UUID = Field(..., description="Unique preset identifier")
is_builtin: bool = Field(..., description="Whether this is a built-in preset")
created_at: datetime = Field(..., description="Creation timestamp")
updated_at: datetime = Field(..., description="Last update timestamp")
model_config = ConfigDict(from_attributes=True)
class FrameworkPresetListResponse(BaseModel):
"""Schema for paginated framework preset list responses."""
items: list[FrameworkPresetResponse] = Field(..., description="List of presets")
total: int = Field(..., ge=0, description="Total number of presets")
page: int = Field(..., ge=1, description="Current page number")
page_size: int = Field(..., ge=1, le=100, description="Items per page")
pages: int = Field(..., ge=0, description="Total number of pages")
model_config = ConfigDict(from_attributes=True)
| """Pydantic schemas for FrameworkPreset API endpoints."""
from datetime import datetime
from typing import Any, Dict, Optional
from uuid import UUID
from pydantic import BaseModel, ConfigDict, Field
class FrameworkPresetBase(BaseModel):
"""Base FrameworkPreset schema with shared fields."""
name: str = Field(
...,
min_length=1,
max_length=100,
pattern=r"^[a-z0-9-]+$",
description="Internal preset name (lowercase, unique)",
examples=["godot", "django", "react"],
)
display_name: str = Field(
...,
min_length=1,
max_length=255,
description="Human-readable display name",
examples=["Godot Engine 4.x", "Django", "React + Vite"],
)
description: Optional[str] = Field(
default=None,
description="Preset description",
)
framework_type: str = Field(
...,
min_length=1,
max_length=50,
description="Framework category: game_engine, backend, frontend, language",
examples=["game_engine", "backend", "frontend", "language"],
)
language: Optional[str] = Field(
default=None,
max_length=50,
description="Programming language",
examples= | [
"# pydantic/pydantic:pydantic/main.py\nBaseModel",
"# pydantic/pydantic:pydantic/v1/fields.py\nField",
"# pydantic/pydantic:pydantic/config.py\nConfigDict"
] | yusufkaraaslan/lazy-bird | lazy_bird/schemas/framework_preset.py |
"""Pydantic schemas for ClaudeAccount API endpoints.
This module defines validation schemas for ClaudeAccount CRUD operations.
Handles dual mode (API vs Subscription) and encrypted credential storage.
"""
from datetime import datetime
from decimal import Decimal
from typing import Literal, Optional
from uuid import UUID
from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator
class ClaudeAccountBase(BaseModel):
"""Base ClaudeAccount schema with shared fields."""
name: str = Field(
...,
min_length=1,
max_length=255,
description="Human-readable account name",
examples=["Production API", "Personal Subscription"],
)
account_type: Literal["api", "subscription"] = Field(
...,
description="Account type: api or subscription",
)
# Claude settings
model: str = Field(
default="claude-sonnet-4-5",
min_length=1,
max_length=100,
description="Claude model identifier",
examples=["claude-sonnet-4-5", "claude-opus-4"],
)
max_tokens: int = Field(
default=8000,
ge=1,
le=200000,
description="Maximum tokens per request",
)
temperature: Decimal = Field(
default=Decimal("0.7"),
ge=Decimal("0.0"),
le=Decimal("1.0"),
description="Model temperature (0.00-1.00)",
)
# Usage limits
monthly_budget_usd: Optional[Decimal] = Field(
default=None,
ge=Decimal("1.00"),
description="Monthly spending limit in USD",
)
is_active: bool = Field(
default=True,
description="Whether account is active and can be used",
)
class ClaudeAccountCreate(ClaudeAccountBase):
"""Schema for creating a new Claude account.
Requires either api_key (for API mode) or config_directory (for subscription mode).
"""
# API mode credentials
api_key: Optional[str] = Field(
default=None,
min_length=1,
max_length=500,
description="Anthropic API key (required for api mode, will be encrypted)",
)
# Subscription mode credentials
config_directory: Optional[str] = Field(
default=None,
min_length=1,
max_length=500,
description="Config directory path (required for subscription mode)",
)
session_token: Optional[str] = Field(
default=None,
min_length=1,
max_length=500,
description="Session token (for subscription mode, will be encrypted)",
)
@model_validator(mode="after")
def validate_credentials(self):
"""Validate that required credentials are provided based on account_type."""
if self.account_type == "api":
if not self.api_key:
raise ValueError("api_key is required for API mode accounts")
elif self.account_type == "subscription":
if not self.config_directory:
raise ValueError("config_directory is required for subscription mode accounts")
return self
class ClaudeAccountUpdate(BaseModel):
"""Schema for updating an existing Claude account.
All fields are optional - only provided fields will be updated.
"""
name: Optional[str] = Field(
default=None,
min_length=1,
max_length=255,
description="Human-readable account name",
)
# Note: account_type cannot be changed after creation
# API mode credentials (encrypted before storage)
api_key: Optional[str] = Field(
default=None,
min_length=1,
max_length=500,
description="New API key (will be encrypted)",
)
# Subscription mode credentials
config_directory: Optional[str] = Field(
default=None,
min_length=1,
max_length=500,
description="Config directory path",
)
session_token: Optional[str] = Field(
default=None,
min_length=1,
max_length=500,
description="New session token (will be encrypted)",
)
# Claude settings
model: Optional[str] = Field(
default=None,
min_length=1,
max_length=100,
description="Claude model identifier",
)
max_tokens: Optional[int] = Field(
default=None,
ge=1,
le=200000,
description="Maximum tokens per request",
)
temperature: Optional[Decimal] = Field(
default=None,
ge=Decimal("0.0"),
le=Decimal("1.0"),
description="Model temperature",
)
monthly_budget_usd: Optional[Decimal] = Field(
default=None,
ge=Decimal("1.00"),
description="Monthly spending limit",
)
is_active: Optional[bool] = Field(
default=None,
description="Whether account is active",
)
model_config = ConfigDict(extra="forbid")
class ClaudeAccountResponse(BaseModel):
"""Schema for Claude account API responses.
Includes all fields except encrypted credentials (uses preview instead).
"""
id: UUID = Field(..., description="Unique account identifier")
name: str = Field(..., description="Human-readable account name")
account_type: Literal["api", "subscription"] = Field(..., description="Account type")
# API key preview (not full key)
api_key_preview: Optional[str] = Field(
default=None,
description="Safe preview of API key (e.g., 'sk-ant-1***ghij')",
)
# Subscription mode (non-sensitive)
config_directory: Optional[str] = Field(
default=None,
description="Config directory path",
)
# Claude settings
model: str = Field(..., description="Claude model identifier")
max_tokens: int = Field(..., description="Maximum tokens per request")
temperature: Decimal = Field(..., description="Model temperature")
# Usage limits
monthly_budget_usd: Optional[Decimal] = Field(
default=None,
description="Monthly spending limit in USD",
)
is_active: bool = Field(..., description="Whether account is active")
# Metadata
created_at: datetime = Field(..., description="Creation timestamp")
updated_at: datetime = Field(..., description="Last update timestamp")
last_used_at: Optional[datetime] = Field(
default=None,
description="Last time this account was used",
)
model_config = ConfigDict(from_attributes=True)
class ClaudeAccountListResponse(BaseModel):
"""Schema for paginated Claude account list responses."""
items: list[ClaudeAccountResponse] = Field(..., description="List of accounts")
total: int = Field(..., ge=0, description="Total number of accounts")
page: int = Field(..., ge=1, description="Current page number")
page_size: int = Field(..., ge=1, le=100, description="Items per page")
pages: int = Field(..., ge=0, description="Total number of pages")
model_config = ConfigDict(from_attributes=True)
| """Pydantic schemas for ClaudeAccount API endpoints.
This module defines validation schemas for ClaudeAccount CRUD operations.
Handles dual mode (API vs Subscription) and encrypted credential storage.
"""
from datetime import datetime
from decimal import Decimal
from typing import Literal, Optional
from uuid import UUID
from pydantic import BaseModel, ConfigDict, Field, field_validator, model_validator
class ClaudeAccountBase(BaseModel):
"""Base ClaudeAccount schema with shared fields."""
name: str = Field(
...,
min_length=1,
max_length=255,
description="Human-readable account name",
examples=["Production API", "Personal Subscription"],
)
account_type: Literal["api", "subscription"] = Field(
...,
description="Account type: api or subscription",
)
# Claude settings
model: str = Field(
default="claude-sonnet-4-5",
min_length=1,
max_length=100,
description="Claude model identifier",
examples=["claude-sonnet-4-5", "claude-opus-4"],
)
max_tokens: int = Field(
default=8000,
ge=1,
le=200000,
description="Maximum tokens per request",
)
temperature: Decimal = Field(
default=Decimal("0.7"),
ge=Decimal("0.0"),
le=Decimal("1.0 | [
"# pydantic/pydantic:pydantic/main.py\nBaseModel",
"# pydantic/pydantic:pydantic/v1/fields.py\nField",
"# pydantic/pydantic:pydantic/functional_validators.py\nmodel_validator",
"# pydantic/pydantic:pydantic/config.py\nConfigDict"
] | yusufkaraaslan/lazy-bird | lazy_bird/schemas/claude_account.py |
"""Pydantic schemas for ApiKey API endpoints."""
from datetime import datetime
from typing import Literal, Optional
from uuid import UUID
from pydantic import BaseModel, ConfigDict, Field
class ApiKeyBase(BaseModel):
"""Base ApiKey schema with shared fields."""
name: str = Field(
...,
min_length=1,
max_length=255,
description="Human-readable key name",
examples=["Production API Key", "Development Key"],
)
project_id: Optional[UUID] = Field(
default=None,
description="Project ID (NULL for organization-level keys)",
)
scopes: list[Literal["read", "write", "admin"]] = Field(
default=["read"],
min_length=1,
description="Array of permission scopes",
examples=[["read", "write"]],
)
expires_at: Optional[datetime] = Field(
default=None,
description="Expiration timestamp (NULL for no expiration)",
)
class ApiKeyCreate(ApiKeyBase):
"""Schema for creating a new API key."""
pass
class ApiKeyUpdate(BaseModel):
"""Schema for updating an existing API key."""
name: Optional[str] = Field(default=None, min_length=1, max_length=255)
scopes: Optional[list[Literal["read", "write", "admin"]]] = Field(default=None, min_length=1)
expires_at: Optional[datetime] = Field(default=None)
is_active: Optional[bool] = Field(default=None)
model_config = ConfigDict(extra="forbid")
class ApiKeyResponse(BaseModel):
"""Schema for API key responses.
Note: Only returns key_prefix for security. Full key is only shown once during creation.
"""
id: UUID = Field(..., description="Unique key identifier")
key_prefix: str = Field(..., description="First 8 chars of key for identification")
name: str = Field(..., description="Human-readable key name")
project_id: Optional[UUID] = Field(default=None, description="Project ID")
scopes: list[str] = Field(..., description="Permission scopes")
is_active: bool = Field(..., description="Whether key is active")
expires_at: Optional[datetime] = Field(default=None, description="Expiration timestamp")
last_used_at: Optional[datetime] = Field(default=None, description="Last usage timestamp")
created_by: Optional[str] = Field(default=None, description="Creator identifier")
created_at: datetime = Field(..., description="Creation timestamp")
revoked_at: Optional[datetime] = Field(default=None, description="Revocation timestamp")
model_config = ConfigDict(from_attributes=True)
class ApiKeyCreateResponse(ApiKeyResponse):
"""Schema for API key creation response.
Includes the full key - THIS IS THE ONLY TIME THE FULL KEY IS RETURNED.
"""
key: str = Field(..., description="Full API key (save this - won't be shown again!)")
class ApiKeyListResponse(BaseModel):
"""Schema for paginated API key list responses."""
items: list[ApiKeyResponse] = Field(..., description="List of API keys")
total: int = Field(..., ge=0, description="Total number of keys")
page: int = Field(..., ge=1, description="Current page number")
page_size: int = Field(..., ge=1, le=100, description="Items per page")
pages: int = Field(..., ge=0, description="Total number of pages")
model_config = ConfigDict(from_attributes=True)
| """Pydantic schemas for ApiKey API endpoints."""
from datetime import datetime
from typing import Literal, Optional
from uuid import UUID
from pydantic import BaseModel, ConfigDict, Field
class ApiKeyBase(BaseModel):
"""Base ApiKey schema with shared fields."""
name: str = Field(
...,
min_length=1,
max_length=255,
description="Human-readable key name",
examples=["Production API Key", "Development Key"],
)
project_id: Optional[UUID] = Field(
default=None,
description="Project ID (NULL for organization-level keys)",
)
scopes: list[Literal["read", "write", "admin"]] = Field(
default=["read"],
min_length=1,
description="Array of permission scopes",
examples=[["read", "write"]],
)
expires_at: Optional[datetime] = Field(
default=None,
description="Expiration timestamp (NULL for no expiration)",
)
class ApiKeyCreate(ApiKeyBase):
"""Schema for creating a new API key."""
pass
class ApiKeyUpdate(BaseModel):
"""Schema for updating an existing API key."""
name: Optional[str] = Field(default=None, min_length=1, max_length=255)
scopes: Optional[list[Literal["read", "write", "admin"]]] = Field(default=None, min_length=1)
expires_at: Optional[datetime] = Field(default=None)
is | [
"# pydantic/pydantic:pydantic/main.py\nBaseModel",
"# pydantic/pydantic:pydantic/v1/fields.py\nField",
"# pydantic/pydantic:pydantic/config.py\nConfigDict"
] | yusufkaraaslan/lazy-bird | lazy_bird/schemas/api_key.py |
"""Pydantic validation schemas for API endpoints.
This package contains Pydantic v2 schemas for request/response validation.
All schemas use modern Pydantic features including field validators and ConfigDict.
"""
from lazy_bird.schemas.api_key import (
ApiKeyBase,
ApiKeyCreate,
ApiKeyCreateResponse,
ApiKeyListResponse,
ApiKeyResponse,
ApiKeyUpdate,
)
from lazy_bird.schemas.claude_account import (
ClaudeAccountBase,
ClaudeAccountCreate,
ClaudeAccountListResponse,
ClaudeAccountResponse,
ClaudeAccountUpdate,
)
from lazy_bird.schemas.framework_preset import (
FrameworkPresetBase,
FrameworkPresetCreate,
FrameworkPresetListResponse,
FrameworkPresetResponse,
FrameworkPresetUpdate,
)
from lazy_bird.schemas.project import (
ProjectBase,
ProjectCreate,
ProjectListResponse,
ProjectResponse,
ProjectUpdate,
)
from lazy_bird.schemas.task_run import (
TaskRunListResponse,
TaskRunQueue,
TaskRunResponse,
TaskRunUpdate,
)
from lazy_bird.schemas.webhook import (
WebhookSubscriptionBase,
WebhookSubscriptionCreate,
WebhookSubscriptionListResponse,
WebhookSubscriptionResponse,
WebhookSubscriptionUpdate,
)
__all__ = [
# Project schemas
"ProjectBase",
"ProjectCreate",
"ProjectUpdate",
"ProjectResponse",
"ProjectListResponse",
# ClaudeAccount schemas
"ClaudeAccountBase",
"ClaudeAccountCreate",
"ClaudeAccountUpdate",
"ClaudeAccountResponse",
"ClaudeAccountListResponse",
# FrameworkPreset schemas
"FrameworkPresetBase",
"FrameworkPresetCreate",
"FrameworkPresetUpdate",
"FrameworkPresetResponse",
"FrameworkPresetListResponse",
# TaskRun schemas
"TaskRunQueue",
"TaskRunUpdate",
"TaskRunResponse",
"TaskRunListResponse",
# WebhookSubscription schemas
"WebhookSubscriptionBase",
"WebhookSubscriptionCreate",
"WebhookSubscriptionUpdate",
"WebhookSubscriptionResponse",
"WebhookSubscriptionListResponse",
# ApiKey schemas
"ApiKeyBase",
"ApiKeyCreate",
"ApiKeyUpdate",
"ApiKeyResponse",
"ApiKeyCreateResponse",
"ApiKeyListResponse",
]
| """Pydantic validation schemas for API endpoints.
This package contains Pydantic v2 schemas for request/response validation.
All schemas use modern Pydantic features including field validators and ConfigDict.
"""
from lazy_bird.schemas.api_key import (
ApiKeyBase,
ApiKeyCreate,
ApiKeyCreateResponse,
ApiKeyListResponse,
ApiKeyResponse,
ApiKeyUpdate,
)
from lazy_bird.schemas.claude_account import (
ClaudeAccountBase,
ClaudeAccountCreate,
ClaudeAccountListResponse,
ClaudeAccountResponse,
ClaudeAccountUpdate,
)
from lazy_bird.schemas.framework_preset import (
FrameworkPresetBase,
FrameworkPresetCreate,
FrameworkPresetListResponse,
FrameworkPresetResponse,
FrameworkPresetUpdate,
)
from lazy_bird.schemas.project import (
ProjectBase,
ProjectCreate,
ProjectListResponse,
ProjectResponse,
ProjectUpdate,
)
from lazy_bird.schemas.task_run import (
TaskRunListResponse,
TaskRunQueue,
TaskRunResponse,
TaskRunUpdate,
)
from lazy_bird.schemas.webhook import (
WebhookSubscriptionBase,
WebhookSubscriptionCreate,
WebhookSubscriptionListResponse,
WebhookSubscriptionResponse,
WebhookSubscriptionUpdate,
)
__all__ = [
# Project schemas
"ProjectBase",
"ProjectCreate",
"ProjectUpdate",
"ProjectResponse",
"ProjectListResponse",
# ClaudeAccount schemas
"ClaudeAccountBase",
"ClaudeAccountCreate",
"ClaudeAccountUpdate", | [
"# yusufkaraaslan/lazy-bird:lazy_bird/schemas/api_key.py\nApiKeyBase",
"# yusufkaraaslan/lazy-bird:lazy_bird/schemas/claude_account.py\nClaudeAccountBase",
"# yusufkaraaslan/lazy-bird:lazy_bird/schemas/framework_preset.py\nFrameworkPresetBase",
"# yusufkaraaslan/lazy-bird:lazy_bird/schemas/project.py\nProject... | yusufkaraaslan/lazy-bird | lazy_bird/schemas/__init__.py |
"""WebhookSubscription model - Client webhook endpoint registrations.
This module defines the WebhookSubscription model for registering webhook
endpoints that receive task execution events.
"""
import uuid
from datetime import datetime
from typing import List, Optional
from sqlalchemy import (
Boolean,
CheckConstraint,
Column,
DateTime,
ForeignKey,
Index,
Integer,
String,
Text,
func,
)
from sqlalchemy.dialects.postgresql import ARRAY, UUID
from sqlalchemy.orm import Mapped, mapped_column, relationship
from lazy_bird.core.database import Base
class WebhookSubscription(Base):
"""WebhookSubscription model for webhook endpoint registrations.
Attributes:
id: Unique subscription identifier
url: Webhook endpoint URL
secret: Secret for HMAC signature verification
project_id: Project ID (NULL for global subscriptions)
events: Array of event types to subscribe to
is_active: Whether subscription is active
last_triggered_at: Last webhook delivery time
failure_count: Number of consecutive failures
last_failure_at: Last failure timestamp
description: Subscription description
created_at: Creation timestamp
updated_at: Last update timestamp
"""
__tablename__ = "webhook_subscriptions"
id: Mapped[uuid.UUID] = mapped_column(
UUID(as_uuid=True),
primary_key=True,
default=uuid.uuid4,
server_default=func.gen_random_uuid(),
nullable=False,
)
url: Mapped[str] = mapped_column(String(500), nullable=False, comment="Webhook endpoint URL")
secret: Mapped[str] = mapped_column(
String(255), nullable=False, comment="Secret for HMAC signature verification"
)
project_id: Mapped[Optional[uuid.UUID]] = mapped_column(
UUID(as_uuid=True),
ForeignKey("projects.id", ondelete="CASCADE"),
nullable=True,
index=True,
comment="Project ID (NULL for global subscriptions)",
)
events: Mapped[List[str]] = mapped_column(
ARRAY(Text), nullable=False, comment="Array of event types"
)
is_active: Mapped[bool] = mapped_column(
Boolean,
nullable=False,
server_default="true",
default=True,
index=True,
comment="Whether subscription is active",
)
last_triggered_at: Mapped[Optional[datetime]] = mapped_column(
DateTime(timezone=True), nullable=True, comment="Last webhook delivery time"
)
failure_count: Mapped[int] = mapped_column(
Integer,
nullable=False,
server_default="0",
default=0,
comment="Number of consecutive failures",
)
last_failure_at: Mapped[Optional[datetime]] = mapped_column(
DateTime(timezone=True), nullable=True, comment="Last failure timestamp"
)
description: Mapped[Optional[str]] = mapped_column(
Text, nullable=True, comment="Subscription description"
)
created_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True),
nullable=False,
server_default=func.current_timestamp(),
default=func.current_timestamp(),
)
updated_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True),
nullable=False,
server_default=func.current_timestamp(),
default=func.current_timestamp(),
onupdate=func.current_timestamp(),
)
project: Mapped[Optional["Project"]] = relationship(
"Project",
back_populates="webhook_subscriptions",
foreign_keys=[project_id],
)
__table_args__ = (
CheckConstraint("url ~ '^https?://'", name="check_url_format"),
Index("idx_webhook_subscriptions_events", events, postgresql_using="gin"),
{"comment": "Client webhook endpoint registrations"},
)
def __repr__(self) -> str:
return f"<WebhookSubscription(id={self.id}, url='{self.url}', active={self.is_active})>"
| """WebhookSubscription model - Client webhook endpoint registrations.
This module defines the WebhookSubscription model for registering webhook
endpoints that receive task execution events.
"""
import uuid
from datetime import datetime
from typing import List, Optional
from sqlalchemy import (
Boolean,
CheckConstraint,
Column,
DateTime,
ForeignKey,
Index,
Integer,
String,
Text,
func,
)
from sqlalchemy.dialects.postgresql import ARRAY, UUID
from sqlalchemy.orm import Mapped, mapped_column, relationship
from lazy_bird.core.database import Base
class WebhookSubscription(Base):
"""WebhookSubscription model for webhook endpoint registrations.
Attributes:
id: Unique subscription identifier
url: Webhook endpoint URL
secret: Secret for HMAC signature verification
project_id: Project ID (NULL for global subscriptions)
events: Array of event types to subscribe to
is_active: Whether subscription is active
last_triggered_at: Last webhook delivery time
failure_count: Number of consecutive failures
last_failure_at: Last failure timestamp
description: Subscription description
created_at: Creation timestamp
updated_at: Last update timestamp
"""
__tablename__ = "webhook_subscriptions"
id: Mapped[uuid.UUID] = mapped_column(
UUID(as_uuid=True),
primary_key=True,
default=uuid.uuid4,
server_default=func.gen | [
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/sql/sqltypes.py\nBoolean",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/orm/base.py\nMapped",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/orm/_orm_constructors.py\nmapped_column",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/sql/schema.py\nForeignKey"
] | yusufkaraaslan/lazy-bird | lazy_bird/models/webhook_subscription.py |
"""User model - JWT authentication users.
This module defines the User model for JWT-based authentication and authorization.
"""
import uuid
from datetime import datetime
from typing import Optional
from sqlalchemy import (
Boolean,
CheckConstraint,
DateTime,
String,
func,
)
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import Mapped, mapped_column
from lazy_bird.core.database import Base
class User(Base):
"""User model for JWT authentication.
Attributes:
id: Unique user identifier
email: User email address (unique)
password_hash: Bcrypt hash of password
display_name: Human-readable display name
role: User role (admin or user)
is_active: Whether user account is active
created_at: Account creation timestamp
updated_at: Last update timestamp
"""
__tablename__ = "users"
id: Mapped[uuid.UUID] = mapped_column(
UUID(as_uuid=True),
primary_key=True,
default=uuid.uuid4,
server_default=func.gen_random_uuid(),
nullable=False,
)
email: Mapped[str] = mapped_column(
String(255),
unique=True,
nullable=False,
index=True,
comment="User email address",
)
password_hash: Mapped[str] = mapped_column(
String(255),
nullable=False,
comment="Bcrypt hash of password",
)
display_name: Mapped[Optional[str]] = mapped_column(
String(255),
nullable=True,
comment="Human-readable display name",
)
role: Mapped[str] = mapped_column(
String(50),
nullable=False,
server_default="user",
default="user",
comment="User role (admin or user)",
)
is_active: Mapped[bool] = mapped_column(
Boolean,
nullable=False,
server_default="true",
default=True,
index=True,
)
created_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True),
nullable=False,
server_default=func.current_timestamp(),
default=func.current_timestamp(),
)
updated_at: Mapped[Optional[datetime]] = mapped_column(
DateTime(timezone=True),
nullable=True,
)
__table_args__ = (
CheckConstraint(
"role IN ('admin', 'user')",
name="check_user_role",
),
{"comment": "JWT authentication users"},
)
def __repr__(self) -> str:
return f"<User(id={self.id}, email='{self.email}', role='{self.role}', active={self.is_active})>"
| """User model - JWT authentication users.
This module defines the User model for JWT-based authentication and authorization.
"""
import uuid
from datetime import datetime
from typing import Optional
from sqlalchemy import (
Boolean,
CheckConstraint,
DateTime,
String,
func,
)
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import Mapped, mapped_column
from lazy_bird.core.database import Base
class User(Base):
"""User model for JWT authentication.
Attributes:
id: Unique user identifier
email: User email address (unique)
password_hash: Bcrypt hash of password
display_name: Human-readable display name
role: User role (admin or user)
is_active: Whether user account is active
created_at: Account creation timestamp
updated_at: Last update timestamp
"""
__tablename__ = "users"
id: Mapped[uuid.UUID] = mapped_column(
UUID(as_uuid=True),
primary_key=True,
default=uuid.uuid4,
server_default=func.gen_random_uuid(),
nullable=False,
)
email: Mapped[str] = mapped_column(
String(255),
unique=True,
nullable=False,
index=True,
comment="User email address",
)
password_hash: Mapped[str] = mapped_column(
| [
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/sql/sqltypes.py\nBoolean",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/orm/base.py\nMapped",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/orm/_orm_constructors.py\nmapped_column",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/sql/schema.py\nCheckConstraint"
] | yusufkaraaslan/lazy-bird | lazy_bird/models/user.py |
"""TaskRunLog model - Detailed task execution logs.
This module defines the TaskRunLog model which stores detailed logs for
task execution, including Claude interactions, tool usage, and progress tracking.
"""
import uuid
from datetime import datetime
from enum import Enum
from typing import Any, Dict, Optional
from sqlalchemy import CheckConstraint, Column, DateTime, ForeignKey, Index, String, Text, func
from sqlalchemy.dialects.postgresql import JSONB, UUID
from sqlalchemy.orm import Mapped, mapped_column, relationship, validates
from lazy_bird.core.database import Base
class LogLevel(str, Enum):
"""Log level enumeration.
Attributes:
DEBUG: Debug-level information
INFO: Informational messages
WARNING: Warning messages
ERROR: Error messages
CRITICAL: Critical error messages
"""
DEBUG = "debug"
INFO = "info"
WARNING = "warning"
ERROR = "error"
CRITICAL = "critical"
class TaskRunLog(Base):
"""TaskRunLog model for detailed task execution logs.
A TaskRunLog represents a single log entry during task execution.
Logs capture Claude's interactions, tool usage, errors, and progress.
Attributes:
id: Unique log identifier (UUID)
task_run_id: Reference to task run (cascade delete)
task_run: Relationship to TaskRun model
level: Log level (debug, info, warning, error, critical)
message: Log message text
step: Execution step (init, planning, implementation, testing, etc.)
tool_name: Claude tool name used (Read, Write, Bash, etc.)
log_metadata: Additional data as JSONB
created_at: Log timestamp
"""
__tablename__ = "task_run_logs"
# -------------------------------------------------------------------------
# PRIMARY KEY
# -------------------------------------------------------------------------
id: Mapped[uuid.UUID] = mapped_column(
UUID(as_uuid=True),
primary_key=True,
default=uuid.uuid4,
server_default=func.gen_random_uuid(),
nullable=False,
)
# -------------------------------------------------------------------------
# RELATIONS
# -------------------------------------------------------------------------
task_run_id: Mapped[uuid.UUID] = mapped_column(
UUID(as_uuid=True),
ForeignKey("task_runs.id", ondelete="CASCADE"),
nullable=False,
index=True, # idx_task_run_logs_task_run
comment="Reference to task run (cascade delete)",
)
# -------------------------------------------------------------------------
# LOG DETAILS
# -------------------------------------------------------------------------
level: Mapped[str] = mapped_column(
String(20),
nullable=False,
index=True, # idx_task_run_logs_level
comment="Log level: debug, info, warning, error, critical",
)
message: Mapped[str] = mapped_column(Text, nullable=False, comment="Log message text")
# -------------------------------------------------------------------------
# CONTEXT
# -------------------------------------------------------------------------
step: Mapped[Optional[str]] = mapped_column(
String(100),
nullable=True,
comment="Execution step: init, planning, implementation, testing, etc.",
)
tool_name: Mapped[Optional[str]] = mapped_column(
String(50), nullable=True, comment="Claude tool name: Read, Write, Edit, Bash, Grep, etc."
)
# -------------------------------------------------------------------------
# ADDITIONAL DATA
# -------------------------------------------------------------------------
log_metadata: Mapped[Optional[Dict[str, Any]]] = mapped_column(
JSONB,
nullable=True,
server_default="{}",
default=dict,
comment="Additional log metadata as JSON",
)
# -------------------------------------------------------------------------
# TIMESTAMP
# -------------------------------------------------------------------------
created_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True),
nullable=False,
server_default=func.current_timestamp(),
default=func.current_timestamp(),
index=True, # idx_task_run_logs_created_at
comment="Log timestamp",
)
# -------------------------------------------------------------------------
# RELATIONSHIPS
# -------------------------------------------------------------------------
# Task run (many-to-one)
task_run: Mapped["TaskRun"] = relationship(
"TaskRun",
back_populates="logs",
foreign_keys=[task_run_id],
)
# -------------------------------------------------------------------------
# TABLE ARGUMENTS (constraints, indexes)
# -------------------------------------------------------------------------
__table_args__ = (
# Level constraint
CheckConstraint(
"level IN ('debug', 'info', 'warning', 'error', 'critical')",
name="check_level",
),
# Composite index for querying logs by task and time
Index("idx_task_run_logs_task_run_created", task_run_id, created_at),
# Table comment
{"comment": "Detailed task execution logs"},
)
# -------------------------------------------------------------------------
# VALIDATORS
# -------------------------------------------------------------------------
@validates("level")
def validate_level(self, key: str, value: str) -> str:
"""Validate log level is valid.
Args:
key: Field name
value: Log level value
Returns:
str: Validated log level
Raises:
ValueError: If log level is invalid
"""
valid_levels = ["debug", "info", "warning", "error", "critical"]
if value not in valid_levels:
raise ValueError(f"level must be one of {valid_levels}, got '{value}'")
return value
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
def __repr__(self) -> str:
"""String representation of TaskRunLog."""
return (
f"<TaskRunLog(id={self.id}, task_run_id={self.task_run_id}, "
f"level='{self.level}', step='{self.step}')>"
)
def is_error(self) -> bool:
"""Check if log is an error or critical level.
Returns:
bool: True if error or critical, False otherwise
"""
return self.level in ["error", "critical"]
def add_metadata(self, key: str, value: Any) -> None:
"""Add or update a metadata field.
Args:
key: Metadata key
value: Metadata value (must be JSON-serializable)
"""
if self.metadata is None:
self.metadata = {}
self.metadata[key] = value
# Flag as modified for SQLAlchemy JSONB change detection
from sqlalchemy.orm.attributes import flag_modified
from sqlalchemy import inspect
session = inspect(self).session
if session:
flag_modified(self, "metadata")
def get_metadata(self, key: str, default: Any = None) -> Any:
"""Get a metadata value.
Args:
key: Metadata key
default: Default value if key not found
Returns:
Any: Metadata value or default
"""
if not self.metadata:
return default
return self.metadata.get(key, default)
def to_dict(self) -> dict:
"""Convert log to dictionary for API responses.
Returns:
dict: Log data as dictionary
"""
return {
"id": str(self.id),
"task_run_id": str(self.task_run_id),
"level": self.level,
"message": self.message,
"step": self.step,
"tool_name": self.tool_name,
"metadata": self.metadata or {},
"created_at": self.created_at.isoformat() if self.created_at else None,
}
@staticmethod
def create_log(
task_run_id: uuid.UUID,
level: str,
message: str,
step: Optional[str] = None,
tool_name: Optional[str] = None,
**metadata: Any,
) -> "TaskRunLog":
"""Factory method to create a log entry.
Args:
task_run_id: Task run UUID
level: Log level (debug, info, warning, error, critical)
message: Log message
step: Optional execution step
tool_name: Optional Claude tool name
**metadata: Additional metadata as keyword arguments
Returns:
TaskRunLog: Created log instance
Example:
>>> log = TaskRunLog.create_log(
... task_run_id=task.id,
... level="info",
... message="Starting task execution",
... step="init",
... duration_ms=150,
... memory_mb=256,
... )
"""
return TaskRunLog(
task_run_id=task_run_id,
level=level,
message=message,
step=step,
tool_name=tool_name,
metadata=metadata if metadata else {},
)
| """TaskRunLog model - Detailed task execution logs.
This module defines the TaskRunLog model which stores detailed logs for
task execution, including Claude interactions, tool usage, and progress tracking.
"""
import uuid
from datetime import datetime
from enum import Enum
from typing import Any, Dict, Optional
from sqlalchemy import CheckConstraint, Column, DateTime, ForeignKey, Index, String, Text, func
from sqlalchemy.dialects.postgresql import JSONB, UUID
from sqlalchemy.orm import Mapped, mapped_column, relationship, validates
from lazy_bird.core.database import Base
class LogLevel(str, Enum):
"""Log level enumeration.
Attributes:
DEBUG: Debug-level information
INFO: Informational messages
WARNING: Warning messages
ERROR: Error messages
CRITICAL: Critical error messages
"""
DEBUG = "debug"
INFO = "info"
WARNING = "warning"
ERROR = "error"
CRITICAL = "critical"
class TaskRunLog(Base):
"""TaskRunLog model for detailed task execution logs.
A TaskRunLog represents a single log entry during task execution.
Logs capture Claude's interactions, tool usage, errors, and progress.
Attributes:
id: Unique log identifier (UUID)
task_run_id: Reference to task run (cascade delete)
task_run: Relationship to TaskRun model
level: Log level (debug, info, warning, error, critical)
message: Log message text
step: Execution step (init, planning, implementation, testing, etc.)
tool_name: Claude tool name used (Read, Write, Bash, etc.)
log_metadata | [
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/sql/schema.py\nCheckConstraint",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/dialects/postgresql/json.py\nJSONB",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/orm/base.py\nMapped",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/sql/sqltypes.py\nUUID",
"# sqlalchemy/sqlalchemy:lib/sql... | yusufkaraaslan/lazy-bird | lazy_bird/models/task_run_log.py |
"""TaskRun model - Task execution records.
This module defines the TaskRun model which stores task execution records,
including work item details, execution status, git information, test results,
and resource usage.
"""
import uuid
from datetime import datetime
from decimal import Decimal
from enum import Enum
from typing import Any, Dict, List, Optional
from sqlalchemy import (
Boolean,
CheckConstraint,
Column,
DateTime,
ForeignKey,
Index,
Integer,
Numeric,
String,
Text,
func,
)
from sqlalchemy.dialects.postgresql import JSONB, UUID
from sqlalchemy.orm import Mapped, mapped_column, relationship, validates
from lazy_bird.core.database import Base
class TaskStatus(str, Enum):
"""Task execution status enumeration.
Attributes:
QUEUED: Task is queued and waiting to execute
RUNNING: Task is currently executing
SUCCESS: Task completed successfully
FAILED: Task failed with error
CANCELLED: Task was cancelled by user
TIMEOUT: Task exceeded timeout limit
"""
QUEUED = "queued"
RUNNING = "running"
SUCCESS = "success"
FAILED = "failed"
CANCELLED = "cancelled"
TIMEOUT = "timeout"
class TaskComplexity(str, Enum):
"""Task complexity level enumeration.
Attributes:
SIMPLE: Simple task (UI, config, documentation)
MEDIUM: Medium complexity (features, refactoring)
COMPLEX: Complex task (architecture, systems, optimization)
"""
SIMPLE = "simple"
MEDIUM = "medium"
COMPLEX = "complex"
class TaskRun(Base):
"""TaskRun model for task execution records.
A TaskRun represents a single execution of a development task by Claude.
It tracks the complete lifecycle from queued to completion, including
git operations, test results, PR creation, and resource usage.
Attributes:
id: Unique task run identifier (UUID)
project_id: Reference to project
project: Relationship to Project model
claude_account_id: Reference to Claude account used
claude_account: Relationship to ClaudeAccount model
work_item_id: External work item ID (e.g., "issue-42")
work_item_url: URL to work item on platform
work_item_title: Work item title
work_item_description: Work item description
task_type: Task type (feature, bugfix, refactor)
complexity: Task complexity (simple, medium, complex)
prompt: Prompt sent to Claude
status: Execution status (queued, running, success, failed, cancelled, timeout)
started_at: When task execution started
completed_at: When task execution completed
duration_seconds: Total execution time in seconds
retry_count: Number of retries attempted
max_retries: Maximum retries allowed
branch_name: Git branch created for task
worktree_path: Path to git worktree
commit_sha: Git commit SHA
pr_url: URL to created pull request
pr_number: PR number on platform
tests_passed: Whether tests passed
test_output: Test execution output
error_message: Error message if failed
tokens_used: Total tokens consumed
cost_usd: Total cost in USD
task_metadata: Additional data as JSONB
created_at: Creation timestamp
updated_at: Last update timestamp
logs: Relationship to TaskRunLog models
"""
__tablename__ = "task_runs"
# -------------------------------------------------------------------------
# PRIMARY KEY
# -------------------------------------------------------------------------
id: Mapped[uuid.UUID] = mapped_column(
UUID(as_uuid=True),
primary_key=True,
default=uuid.uuid4,
server_default=func.gen_random_uuid(),
nullable=False,
)
# -------------------------------------------------------------------------
# RELATIONS
# -------------------------------------------------------------------------
project_id: Mapped[uuid.UUID] = mapped_column(
UUID(as_uuid=True),
ForeignKey("projects.id", ondelete="CASCADE"),
nullable=False,
index=True, # idx_task_runs_project
comment="Reference to project (cascade delete)",
)
claude_account_id: Mapped[Optional[uuid.UUID]] = mapped_column(
UUID(as_uuid=True),
ForeignKey("claude_accounts.id", ondelete="SET NULL"),
nullable=True,
comment="Reference to Claude account used for execution",
)
# -------------------------------------------------------------------------
# WORK ITEM IDENTIFICATION
# -------------------------------------------------------------------------
work_item_id: Mapped[str] = mapped_column(
String(255),
nullable=False,
index=True, # idx_task_runs_work_item
comment="External work item ID (e.g., 'issue-42', 'JIRA-123')",
)
work_item_url: Mapped[Optional[str]] = mapped_column(
String(500), nullable=True, comment="URL to work item on platform"
)
work_item_title: Mapped[Optional[str]] = mapped_column(
String(500), nullable=True, comment="Work item title"
)
work_item_description: Mapped[Optional[str]] = mapped_column(
Text, nullable=True, comment="Work item description/body"
)
# -------------------------------------------------------------------------
# TASK DETAILS
# -------------------------------------------------------------------------
task_type: Mapped[str] = mapped_column(
String(50),
nullable=False,
server_default="feature",
default="feature",
comment="Task type: feature, bugfix, refactor, docs, etc.",
)
complexity: Mapped[Optional[str]] = mapped_column(
String(20), nullable=True, comment="Task complexity: simple, medium, complex"
)
prompt: Mapped[str] = mapped_column(
Text, nullable=False, comment="Prompt sent to Claude for task execution"
)
# -------------------------------------------------------------------------
# EXECUTION STATUS
# -------------------------------------------------------------------------
status: Mapped[str] = mapped_column(
String(50),
nullable=False,
server_default="queued",
default="queued",
index=True, # idx_task_runs_status
comment="Execution status: queued, running, success, failed, cancelled, timeout",
)
# -------------------------------------------------------------------------
# PROGRESS TRACKING
# -------------------------------------------------------------------------
started_at: Mapped[Optional[datetime]] = mapped_column(
DateTime(timezone=True), nullable=True, comment="When task execution started"
)
completed_at: Mapped[Optional[datetime]] = mapped_column(
DateTime(timezone=True), nullable=True, comment="When task execution completed"
)
duration_seconds: Mapped[Optional[int]] = mapped_column(
Integer, nullable=True, comment="Total execution time in seconds (auto-calculated)"
)
retry_count: Mapped[int] = mapped_column(
Integer,
nullable=False,
server_default="0",
default=0,
comment="Number of retries attempted",
)
max_retries: Mapped[int] = mapped_column(
Integer, nullable=False, server_default="3", default=3, comment="Maximum retries allowed"
)
# -------------------------------------------------------------------------
# GIT DETAILS
# -------------------------------------------------------------------------
branch_name: Mapped[Optional[str]] = mapped_column(
String(255), nullable=True, comment="Git branch name created for this task"
)
worktree_path: Mapped[Optional[str]] = mapped_column(
String(500), nullable=True, comment="Path to git worktree"
)
commit_sha: Mapped[Optional[str]] = mapped_column(
String(40), nullable=True, comment="Git commit SHA"
)
# -------------------------------------------------------------------------
# RESULTS
# -------------------------------------------------------------------------
pr_url: Mapped[Optional[str]] = mapped_column(
String(500), nullable=True, comment="URL to created pull request"
)
pr_number: Mapped[Optional[int]] = mapped_column(
Integer, nullable=True, comment="Pull request number on platform"
)
tests_passed: Mapped[Optional[bool]] = mapped_column(
Boolean, nullable=True, comment="Whether tests passed (NULL if not run)"
)
test_output: Mapped[Optional[str]] = mapped_column(
Text, nullable=True, comment="Test execution output"
)
error_message: Mapped[Optional[str]] = mapped_column(
Text, nullable=True, comment="Error message if task failed"
)
# -------------------------------------------------------------------------
# RESOURCE USAGE
# -------------------------------------------------------------------------
tokens_used: Mapped[Optional[int]] = mapped_column(
Integer, nullable=True, comment="Total tokens consumed by Claude"
)
cost_usd: Mapped[Optional[Decimal]] = mapped_column(
Numeric(10, 4), nullable=True, comment="Total cost in USD"
)
# -------------------------------------------------------------------------
# METADATA
# -------------------------------------------------------------------------
task_metadata: Mapped[Optional[Dict[str, Any]]] = mapped_column(
JSONB,
nullable=True,
server_default="{}",
default=dict,
comment="Additional task metadata as JSON",
)
created_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True),
nullable=False,
server_default=func.current_timestamp(),
default=func.current_timestamp(),
index=True, # idx_task_runs_created_at (with DESC)
comment="Creation timestamp",
)
updated_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True),
nullable=False,
server_default=func.current_timestamp(),
default=func.current_timestamp(),
onupdate=func.current_timestamp(),
comment="Last update timestamp",
)
# -------------------------------------------------------------------------
# RELATIONSHIPS
# -------------------------------------------------------------------------
# Project (many-to-one)
project: Mapped["Project"] = relationship(
"Project",
back_populates="task_runs",
foreign_keys=[project_id],
lazy="joined", # Eager load project data
)
# Claude account (many-to-one)
claude_account: Mapped[Optional["ClaudeAccount"]] = relationship(
"ClaudeAccount",
back_populates="task_runs",
foreign_keys=[claude_account_id],
lazy="joined", # Eager load account data
)
# Task run logs (one-to-many)
logs: Mapped[List["TaskRunLog"]] = relationship(
"TaskRunLog",
back_populates="task_run",
cascade="all, delete-orphan",
order_by="TaskRunLog.created_at",
lazy="dynamic",
)
# -------------------------------------------------------------------------
# TABLE ARGUMENTS (constraints, indexes)
# -------------------------------------------------------------------------
__table_args__ = (
# Status constraint
CheckConstraint(
"status IN ('queued', 'running', 'success', 'failed', 'cancelled', 'timeout')",
name="check_status",
),
# Complexity constraint
CheckConstraint(
"complexity IS NULL OR complexity IN ('simple', 'medium', 'complex')",
name="check_complexity",
),
# Composite index for common query: project + status
Index("idx_task_runs_project_status", project_id, status),
# Index on created_at DESC for recent tasks
Index("idx_task_runs_created_at", created_at.desc()),
# Table comment
{"comment": "Task execution records and results"},
)
# -------------------------------------------------------------------------
# VALIDATORS
# -------------------------------------------------------------------------
@validates("status")
def validate_status(self, key: str, value: str) -> str:
"""Validate status is valid.
Args:
key: Field name
value: Status value
Returns:
str: Validated status
Raises:
ValueError: If status is invalid
"""
valid_statuses = ["queued", "running", "success", "failed", "cancelled", "timeout"]
if value not in valid_statuses:
raise ValueError(f"status must be one of {valid_statuses}, got '{value}'")
return value
@validates("complexity")
def validate_complexity(self, key: str, value: Optional[str]) -> Optional[str]:
"""Validate complexity is valid.
Args:
key: Field name
value: Complexity value
Returns:
Optional[str]: Validated complexity
Raises:
ValueError: If complexity is invalid
"""
if value is None:
return value
valid_complexities = ["simple", "medium", "complex"]
if value not in valid_complexities:
raise ValueError(f"complexity must be one of {valid_complexities}, got '{value}'")
return value
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
def __repr__(self) -> str:
"""String representation of TaskRun."""
return (
f"<TaskRun(id={self.id}, work_item='{self.work_item_id}', "
f"status='{self.status}', project_id={self.project_id})>"
)
def is_terminal_status(self) -> bool:
"""Check if task is in terminal status (completed or failed).
Returns:
bool: True if in terminal status, False otherwise
"""
return self.status in ["success", "failed", "cancelled", "timeout"]
def is_running(self) -> bool:
"""Check if task is currently running.
Returns:
bool: True if status is running, False otherwise
"""
return self.status == "running"
def is_queued(self) -> bool:
"""Check if task is queued.
Returns:
bool: True if status is queued, False otherwise
"""
return self.status == "queued"
def can_retry(self) -> bool:
"""Check if task can be retried.
Returns:
bool: True if retry_count < max_retries, False otherwise
"""
return self.retry_count < self.max_retries
def mark_started(self) -> None:
"""Mark task as started (sets started_at and status)."""
self.status = "running"
self.started_at = func.current_timestamp()
def mark_completed(self, success: bool = True, error: Optional[str] = None) -> None:
"""Mark task as completed.
Args:
success: Whether task completed successfully
error: Error message if failed
"""
self.status = "success" if success else "failed"
self.completed_at = func.current_timestamp()
if error:
self.error_message = error
# Calculate duration (will be handled by trigger in database)
if self.started_at and isinstance(self.started_at, datetime):
duration = datetime.now(self.started_at.tzinfo) - self.started_at
self.duration_seconds = int(duration.total_seconds())
def increment_retry(self) -> None:
"""Increment retry count and reset to queued status."""
self.retry_count += 1
self.status = "queued"
self.started_at = None
self.completed_at = None
self.error_message = None
def add_metadata(self, key: str, value: Any) -> None:
"""Add or update a metadata field.
Args:
key: Metadata key
value: Metadata value (must be JSON-serializable)
"""
if self.task_metadata is None:
self.task_metadata = {}
self.task_metadata[key] = value
# Flag as modified for SQLAlchemy JSONB change detection
from sqlalchemy.orm.attributes import flag_modified
from sqlalchemy import inspect
session = inspect(self).session
if session:
flag_modified(self, "task_metadata")
def get_metadata(self, key: str, default: Any = None) -> Any:
"""Get a metadata value.
Args:
key: Metadata key
default: Default value if key not found
Returns:
Any: Metadata value or default
"""
if not self.task_metadata:
return default
return self.task_metadata.get(key, default)
def to_dict(self) -> dict:
"""Convert task run to dictionary for API responses.
Returns:
dict: Task run data as dictionary
"""
return {
"id": str(self.id),
"project_id": str(self.project_id),
"claude_account_id": str(self.claude_account_id) if self.claude_account_id else None,
"work_item_id": self.work_item_id,
"work_item_url": self.work_item_url,
"work_item_title": self.work_item_title,
"task_type": self.task_type,
"complexity": self.complexity,
"status": self.status,
"started_at": self.started_at.isoformat() if self.started_at else None,
"completed_at": self.completed_at.isoformat() if self.completed_at else None,
"duration_seconds": self.duration_seconds,
"retry_count": self.retry_count,
"max_retries": self.max_retries,
"branch_name": self.branch_name,
"commit_sha": self.commit_sha,
"pr_url": self.pr_url,
"pr_number": self.pr_number,
"tests_passed": self.tests_passed,
"tokens_used": self.tokens_used,
"cost_usd": float(self.cost_usd) if self.cost_usd else None,
"task_metadata": self.task_metadata or {},
"created_at": self.created_at.isoformat() if self.created_at else None,
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
}
| """TaskRun model - Task execution records.
This module defines the TaskRun model which stores task execution records,
including work item details, execution status, git information, test results,
and resource usage.
"""
import uuid
from datetime import datetime
from decimal import Decimal
from enum import Enum
from typing import Any, Dict, List, Optional
from sqlalchemy import (
Boolean,
CheckConstraint,
Column,
DateTime,
ForeignKey,
Index,
Integer,
Numeric,
String,
Text,
func,
)
from sqlalchemy.dialects.postgresql import JSONB, UUID
from sqlalchemy.orm import Mapped, mapped_column, relationship, validates
from lazy_bird.core.database import Base
class TaskStatus(str, Enum):
"""Task execution status enumeration.
Attributes:
QUEUED: Task is queued and waiting to execute
RUNNING: Task is currently executing
SUCCESS: Task completed successfully
FAILED: Task failed with error
CANCELLED: Task was cancelled by user
TIMEOUT: Task exceeded timeout limit
"""
QUEUED = "queued"
RUNNING = "running"
SUCCESS = "success"
FAILED = "failed"
CANCELLED = "cancelled"
TIMEOUT = "timeout"
class TaskComplexity(str, Enum):
"""Task complexity level enumeration.
Attributes:
SIMPLE: Simple task (UI, config, documentation)
MEDIUM: Medium complexity (features, refactoring)
COMPLEX: Complex task (architecture, systems, optimization)
| [
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/sql/sqltypes.py\nBoolean",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/dialects/postgresql/json.py\nJSONB",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/orm/base.py\nMapped",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/orm/_orm_constructors.py\nmapped_column",
"# sqlalchemy/sqlalc... | yusufkaraaslan/lazy-bird | lazy_bird/models/task_run.py |
"""Project model - Project configurations and settings.
This module defines the Project model which stores project configurations,
automation settings, integration details, and resource limits.
"""
import uuid
from datetime import datetime
from decimal import Decimal
from typing import List, Optional
from sqlalchemy import (
BigInteger,
Boolean,
Column,
DateTime,
ForeignKey,
Index,
Integer,
Numeric,
String,
Text,
func,
)
from sqlalchemy.dialects.postgresql import TSVECTOR, UUID
from sqlalchemy.orm import Mapped, mapped_column, relationship
from lazy_bird.core.database import Base
class Project(Base):
"""Project model for storing project configurations.
A Project represents a software project being automated by Lazy-Bird.
It contains git repository information, framework configuration,
automation settings, and resource limits.
Attributes:
id: Unique project identifier (UUID)
name: Human-readable project name
slug: URL-safe unique identifier
repo_url: Git repository URL
default_branch: Default git branch (usually 'main' or 'master')
framework_preset_id: Reference to framework preset
framework_preset: Relationship to FrameworkPreset model
project_type: Project type (python, nodejs, rust, godot, etc.)
test_command: Custom test command (overrides preset)
build_command: Custom build command (overrides preset)
lint_command: Custom lint command (overrides preset)
format_command: Custom format command (overrides preset)
automation_enabled: Whether automation is active
ready_state_name: State name for ready tasks (e.g., "Ready")
in_progress_state_name: State name for running tasks
review_state_name: State name for review
done_state_name: State name for completed tasks
max_concurrent_tasks: Maximum parallel task executions
task_timeout_seconds: Task execution timeout
max_cost_per_task_usd: Cost limit per task
daily_cost_limit_usd: Daily total cost limit
github_installation_id: GitHub App installation ID
gitlab_project_id: GitLab project ID
source_platform: Platform type (github, gitlab, plane)
source_platform_url: Platform URL
claude_account_id: Reference to Claude account
claude_account: Relationship to ClaudeAccount model
created_at: Creation timestamp
updated_at: Last update timestamp
deleted_at: Soft delete timestamp (NULL if active)
search_vector: Full-text search vector (auto-generated)
task_runs: Relationship to TaskRun models
webhook_subscriptions: Relationship to WebhookSubscription models
daily_usages: Relationship to DailyUsage models
api_keys: Relationship to ApiKey models
"""
__tablename__ = "projects"
# -------------------------------------------------------------------------
# PRIMARY KEY
# -------------------------------------------------------------------------
id: Mapped[uuid.UUID] = mapped_column(
UUID(as_uuid=True),
primary_key=True,
default=uuid.uuid4,
server_default=func.gen_random_uuid(),
nullable=False,
)
# -------------------------------------------------------------------------
# BASIC INFORMATION
# -------------------------------------------------------------------------
name: Mapped[str] = mapped_column(
String(255), nullable=False, comment="Human-readable project name"
)
slug: Mapped[str] = mapped_column(
String(100),
unique=True,
nullable=False,
index=True, # idx_projects_slug
comment="URL-safe unique identifier",
)
# -------------------------------------------------------------------------
# GIT CONFIGURATION
# -------------------------------------------------------------------------
repo_url: Mapped[str] = mapped_column(
String(500), nullable=False, comment="Git repository URL (GitHub, GitLab, etc.)"
)
default_branch: Mapped[str] = mapped_column(
String(100),
nullable=False,
server_default="main",
default="main",
comment="Default git branch for task execution",
)
# -------------------------------------------------------------------------
# FRAMEWORK CONFIGURATION
# -------------------------------------------------------------------------
framework_preset_id: Mapped[Optional[uuid.UUID]] = mapped_column(
UUID(as_uuid=True),
ForeignKey("framework_presets.id", ondelete="SET NULL"),
nullable=True,
comment="Reference to framework preset (optional)",
)
project_type: Mapped[str] = mapped_column(
String(50), nullable=False, comment="Project type: python, nodejs, rust, godot, etc."
)
# -------------------------------------------------------------------------
# CUSTOM COMMANDS (override preset)
# -------------------------------------------------------------------------
test_command: Mapped[Optional[str]] = mapped_column(
String(500), nullable=True, comment="Custom test command (overrides preset)"
)
build_command: Mapped[Optional[str]] = mapped_column(
String(500), nullable=True, comment="Custom build command (overrides preset)"
)
lint_command: Mapped[Optional[str]] = mapped_column(
String(500), nullable=True, comment="Custom lint command (overrides preset)"
)
format_command: Mapped[Optional[str]] = mapped_column(
String(500), nullable=True, comment="Custom format command (overrides preset)"
)
# -------------------------------------------------------------------------
# AUTOMATION SETTINGS
# -------------------------------------------------------------------------
automation_enabled: Mapped[bool] = mapped_column(
Boolean,
nullable=False,
server_default="false",
default=False,
index=True, # idx_projects_automation_enabled
comment="Whether automation is active for this project",
)
ready_state_name: Mapped[Optional[str]] = mapped_column(
String(100), nullable=True, comment="State name for ready tasks (e.g., 'Ready', 'To Do')"
)
in_progress_state_name: Mapped[str] = mapped_column(
String(100),
nullable=False,
server_default="In Progress",
default="In Progress",
comment="State name for running tasks",
)
review_state_name: Mapped[str] = mapped_column(
String(100),
nullable=False,
server_default="In Review",
default="In Review",
comment="State name for tasks in review",
)
done_state_name: Mapped[str] = mapped_column(
String(100),
nullable=False,
server_default="Done",
default="Done",
comment="State name for completed tasks",
)
# -------------------------------------------------------------------------
# RESOURCE LIMITS
# -------------------------------------------------------------------------
max_concurrent_tasks: Mapped[int] = mapped_column(
Integer,
nullable=False,
server_default="3",
default=3,
comment="Maximum number of parallel task executions",
)
task_timeout_seconds: Mapped[int] = mapped_column(
Integer,
nullable=False,
server_default="1800",
default=1800, # 30 minutes
comment="Task execution timeout in seconds",
)
max_cost_per_task_usd: Mapped[Decimal] = mapped_column(
Numeric(10, 2),
nullable=False,
server_default="5.00",
default=Decimal("5.00"),
comment="Maximum cost per task in USD",
)
daily_cost_limit_usd: Mapped[Decimal] = mapped_column(
Numeric(10, 2),
nullable=False,
server_default="50.00",
default=Decimal("50.00"),
comment="Daily total cost limit in USD",
)
# -------------------------------------------------------------------------
# INTEGRATION SETTINGS
# -------------------------------------------------------------------------
github_installation_id: Mapped[Optional[int]] = mapped_column(
BigInteger, nullable=True, comment="GitHub App installation ID for this project"
)
gitlab_project_id: Mapped[Optional[int]] = mapped_column(
BigInteger, nullable=True, comment="GitLab project ID for this project"
)
source_platform: Mapped[Optional[str]] = mapped_column(
String(50),
nullable=True,
index=True, # idx_projects_source_platform
comment="Source platform: github, gitlab, plane, etc.",
)
source_platform_url: Mapped[Optional[str]] = mapped_column(
String(500), nullable=True, comment="Platform URL for web UI integration"
)
# -------------------------------------------------------------------------
# CLAUDE ACCOUNT
# -------------------------------------------------------------------------
claude_account_id: Mapped[Optional[uuid.UUID]] = mapped_column(
UUID(as_uuid=True),
ForeignKey("claude_accounts.id", ondelete="SET NULL"),
nullable=True,
comment="Reference to Claude API account",
)
# -------------------------------------------------------------------------
# METADATA
# -------------------------------------------------------------------------
created_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True),
nullable=False,
server_default=func.current_timestamp(),
default=func.current_timestamp(),
comment="Creation timestamp",
)
updated_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True),
nullable=False,
server_default=func.current_timestamp(),
default=func.current_timestamp(),
onupdate=func.current_timestamp(),
comment="Last update timestamp",
)
deleted_at: Mapped[Optional[datetime]] = mapped_column(
DateTime(timezone=True), nullable=True, comment="Soft delete timestamp (NULL if active)"
)
# -------------------------------------------------------------------------
# FULL-TEXT SEARCH
# -------------------------------------------------------------------------
search_vector: Mapped[Optional[str]] = mapped_column(
TSVECTOR,
nullable=True,
# Note: In PostgreSQL, this is a GENERATED ALWAYS column
# SQLAlchemy doesn't directly support GENERATED columns in declarative,
# so we'll create this via Alembic migration with raw SQL
comment="Full-text search vector (auto-generated from name + repo_url)",
)
# -------------------------------------------------------------------------
# RELATIONSHIPS
# -------------------------------------------------------------------------
# Framework preset (many-to-one)
framework_preset: Mapped[Optional["FrameworkPreset"]] = relationship(
"FrameworkPreset",
back_populates="projects",
foreign_keys=[framework_preset_id],
lazy="joined", # Eager load preset data
)
# Claude account (many-to-one)
claude_account: Mapped[Optional["ClaudeAccount"]] = relationship(
"ClaudeAccount",
back_populates="projects",
foreign_keys=[claude_account_id],
lazy="joined", # Eager load account data
)
# Task runs (one-to-many)
task_runs: Mapped[List["TaskRun"]] = relationship(
"TaskRun",
back_populates="project",
cascade="all, delete-orphan",
lazy="select",
)
# Webhook subscriptions (one-to-many)
webhook_subscriptions: Mapped[List["WebhookSubscription"]] = relationship(
"WebhookSubscription",
back_populates="project",
cascade="all, delete-orphan",
lazy="dynamic",
)
# Daily usage records (one-to-many)
daily_usages: Mapped[List["DailyUsage"]] = relationship(
"DailyUsage",
back_populates="project",
cascade="all, delete-orphan",
lazy="dynamic",
)
# API keys (one-to-many)
api_keys: Mapped[List["ApiKey"]] = relationship(
"ApiKey",
back_populates="project",
cascade="all, delete-orphan",
lazy="dynamic",
)
# -------------------------------------------------------------------------
# TABLE ARGUMENTS (indexes, constraints)
# -------------------------------------------------------------------------
__table_args__ = (
# Index on slug (already defined via index=True on column)
# Index("idx_projects_slug", "slug"), # Auto-created by index=True
# Index on source_platform
# Index("idx_projects_source_platform", "source_platform"), # Auto-created
# Index on automation_enabled
# Index("idx_projects_automation_enabled", "automation_enabled"), # Auto-created
# GIN index for full-text search (created via Alembic migration)
Index("idx_projects_search", search_vector, postgresql_using="gin"),
# Partial index for active projects (non-deleted)
Index(
"idx_projects_deleted_at",
deleted_at,
postgresql_where=(deleted_at.is_(None)),
),
# Table comment
{"comment": "Project configurations and automation settings"},
)
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
def __repr__(self) -> str:
"""String representation of Project."""
return (
f"<Project(id={self.id}, slug='{self.slug}', "
f"name='{self.name}', type='{self.project_type}', "
f"enabled={self.automation_enabled})>"
)
@property
def is_active(self) -> bool:
"""Check if project is active (not soft-deleted).
Returns:
bool: True if project is active, False if deleted
"""
return self.deleted_at is None
def get_effective_test_command(self) -> Optional[str]:
"""Get effective test command (custom or from preset).
Returns:
Optional[str]: Test command to use
"""
if self.test_command:
return self.test_command
if self.framework_preset:
return self.framework_preset.test_command
return None
def get_effective_build_command(self) -> Optional[str]:
"""Get effective build command (custom or from preset).
Returns:
Optional[str]: Build command to use
"""
if self.build_command:
return self.build_command
if self.framework_preset:
return self.framework_preset.build_command
return None
def get_effective_lint_command(self) -> Optional[str]:
"""Get effective lint command (custom or from preset).
Returns:
Optional[str]: Lint command to use
"""
if self.lint_command:
return self.lint_command
if self.framework_preset:
return self.framework_preset.lint_command
return None
def get_effective_format_command(self) -> Optional[str]:
"""Get effective format command (custom or from preset).
Returns:
Optional[str]: Format command to use
"""
if self.format_command:
return self.format_command
if self.framework_preset:
return self.framework_preset.format_command
return None
def soft_delete(self) -> None:
"""Soft delete the project by setting deleted_at timestamp."""
self.deleted_at = func.current_timestamp()
self.automation_enabled = False # Disable automation on delete
def restore(self) -> None:
"""Restore a soft-deleted project."""
self.deleted_at = None
| """Project model - Project configurations and settings.
This module defines the Project model which stores project configurations,
automation settings, integration details, and resource limits.
"""
import uuid
from datetime import datetime
from decimal import Decimal
from typing import List, Optional
from sqlalchemy import (
BigInteger,
Boolean,
Column,
DateTime,
ForeignKey,
Index,
Integer,
Numeric,
String,
Text,
func,
)
from sqlalchemy.dialects.postgresql import TSVECTOR, UUID
from sqlalchemy.orm import Mapped, mapped_column, relationship
from lazy_bird.core.database import Base
class Project(Base):
"""Project model for storing project configurations.
A Project represents a software project being automated by Lazy-Bird.
It contains git repository information, framework configuration,
automation settings, and resource limits.
Attributes:
id: Unique project identifier (UUID)
name: Human-readable project name
slug: URL-safe unique identifier
repo_url: Git repository URL
default_branch: Default git branch (usually 'main' or 'master')
framework_preset_id: Reference to framework preset
framework_preset: Relationship to FrameworkPreset model
project_type: Project type (python, nodejs, rust, godot, etc.)
test_command: Custom test command (overrides preset)
build_command: Custom build command (overrides preset)
lint_command: Custom lint command (overrides preset)
format_command: Custom format command (overrides preset)
automation_enabled: Whether automation is active | [
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/sql/sqltypes.py\nBigInteger",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/dialects/postgresql/types.py\nTSVECTOR",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/orm/base.py\nMapped",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/orm/_orm_constructors.py\nmapped_column",
"# sqlalchemy... | yusufkaraaslan/lazy-bird | lazy_bird/models/project.py |
"""FrameworkPreset model - Framework-specific command presets.
This module defines the FrameworkPreset model which stores predefined
configurations for different frameworks (Godot, Django, React, etc.).
"""
import uuid
from datetime import datetime
from typing import Any, Dict, List, Optional
from sqlalchemy import Boolean, Column, DateTime, String, Text, func
from sqlalchemy.dialects.postgresql import JSONB, UUID
from sqlalchemy.orm import Mapped, mapped_column, relationship
from lazy_bird.core.database import Base
class FrameworkPreset(Base):
"""FrameworkPreset model for framework-specific configurations.
A FrameworkPreset defines default commands and settings for a specific
framework (e.g., Godot, Django, React). Lazy-Bird includes built-in
presets for 15+ frameworks, and users can create custom presets.
Attributes:
id: Unique preset identifier (UUID)
name: Internal preset name (unique, lowercase)
display_name: Human-readable display name
description: Preset description
framework_type: Framework category (game_engine, backend, frontend, language)
language: Programming language (gdscript, python, javascript, rust, etc.)
test_command: Command to run tests (required)
build_command: Command to build project (optional)
lint_command: Command to lint code (optional)
format_command: Command to format code (optional)
config_files: JSON object with framework-specific configuration file paths
is_builtin: Whether this is a built-in preset (cannot be deleted)
created_at: Creation timestamp
updated_at: Last update timestamp
projects: Relationship to Project models using this preset
"""
__tablename__ = "framework_presets"
# -------------------------------------------------------------------------
# PRIMARY KEY
# -------------------------------------------------------------------------
id: Mapped[uuid.UUID] = mapped_column(
UUID(as_uuid=True),
primary_key=True,
default=uuid.uuid4,
server_default=func.gen_random_uuid(),
nullable=False,
)
# -------------------------------------------------------------------------
# BASIC INFORMATION
# -------------------------------------------------------------------------
name: Mapped[str] = mapped_column(
String(100), unique=True, nullable=False, comment="Internal preset name (lowercase, unique)"
)
display_name: Mapped[str] = mapped_column(
String(255), nullable=False, comment="Human-readable display name"
)
description: Mapped[Optional[str]] = mapped_column(
Text, nullable=True, comment="Preset description"
)
# -------------------------------------------------------------------------
# FRAMEWORK DETAILS
# -------------------------------------------------------------------------
framework_type: Mapped[str] = mapped_column(
String(50),
nullable=False,
index=True, # idx_framework_presets_type
comment="Framework category: game_engine, backend, frontend, language",
)
language: Mapped[Optional[str]] = mapped_column(
String(50),
nullable=True,
comment="Programming language: gdscript, python, javascript, rust, etc.",
)
# -------------------------------------------------------------------------
# DEFAULT COMMANDS
# -------------------------------------------------------------------------
test_command: Mapped[str] = mapped_column(
String(500), nullable=False, comment="Command to run tests (required)"
)
build_command: Mapped[Optional[str]] = mapped_column(
String(500), nullable=True, comment="Command to build project (optional)"
)
lint_command: Mapped[Optional[str]] = mapped_column(
String(500), nullable=True, comment="Command to lint code (optional)"
)
format_command: Mapped[Optional[str]] = mapped_column(
String(500), nullable=True, comment="Command to format code (optional)"
)
# -------------------------------------------------------------------------
# ADDITIONAL CONFIGURATION
# -------------------------------------------------------------------------
config_files: Mapped[Optional[Dict[str, Any]]] = mapped_column(
JSONB, nullable=True, comment="JSON object with framework-specific config file paths"
)
# -------------------------------------------------------------------------
# METADATA
# -------------------------------------------------------------------------
is_builtin: Mapped[bool] = mapped_column(
Boolean,
nullable=False,
server_default="false",
default=False,
index=True, # idx_framework_presets_builtin
comment="Built-in preset (cannot be deleted)",
)
created_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True),
nullable=False,
server_default=func.current_timestamp(),
default=func.current_timestamp(),
comment="Creation timestamp",
)
updated_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True),
nullable=False,
server_default=func.current_timestamp(),
default=func.current_timestamp(),
onupdate=func.current_timestamp(),
comment="Last update timestamp",
)
# -------------------------------------------------------------------------
# RELATIONSHIPS
# -------------------------------------------------------------------------
# Projects using this preset (one-to-many)
projects: Mapped[List["Project"]] = relationship(
"Project",
back_populates="framework_preset",
foreign_keys="Project.framework_preset_id",
lazy="dynamic", # Return query object for filtering
)
# -------------------------------------------------------------------------
# TABLE ARGUMENTS (indexes, constraints)
# -------------------------------------------------------------------------
__table_args__ = (
# Index on framework_type (auto-created by index=True)
# Index on is_builtin (auto-created by index=True)
# Table comment
{"comment": "Framework-specific command presets and configurations"},
)
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
def __repr__(self) -> str:
"""String representation of FrameworkPreset."""
return (
f"<FrameworkPreset(id={self.id}, name='{self.name}', "
f"display_name='{self.display_name}', type='{self.framework_type}', "
f"builtin={self.is_builtin})>"
)
def has_build_command(self) -> bool:
"""Check if preset has a build command.
Returns:
bool: True if build_command is set, False otherwise
"""
return self.build_command is not None and self.build_command.strip() != ""
def has_lint_command(self) -> bool:
"""Check if preset has a lint command.
Returns:
bool: True if lint_command is set, False otherwise
"""
return self.lint_command is not None and self.lint_command.strip() != ""
def has_format_command(self) -> bool:
"""Check if preset has a format command.
Returns:
bool: True if format_command is set, False otherwise
"""
return self.format_command is not None and self.format_command.strip() != ""
def get_config_file(self, key: str, default: Optional[str] = None) -> Optional[str]:
"""Get a specific config file path from config_files JSON.
Args:
key: Config file key (e.g., 'godot', 'python', 'package_json')
default: Default value if key not found
Returns:
Optional[str]: Config file path or default
Example:
>>> preset.config_files = {"godot": "project.godot", "python": "pyproject.toml"}
>>> preset.get_config_file("godot")
'project.godot'
>>> preset.get_config_file("missing", "default.txt")
'default.txt'
"""
if not self.config_files:
return default
return self.config_files.get(key, default)
def set_config_file(self, key: str, value: str) -> None:
"""Set a config file path in config_files JSON.
Args:
key: Config file key
value: File path
Example:
>>> preset.set_config_file("godot", "project.godot")
>>> preset.config_files
{"godot": "project.godot"}
"""
if self.config_files is None:
self.config_files = {}
self.config_files[key] = value
# Mark as modified for SQLAlchemy to detect change
# (JSONB columns need special handling)
from sqlalchemy.orm.attributes import flag_modified
from sqlalchemy import inspect
# Only flag if object is in session
session = inspect(self).session
if session:
flag_modified(self, "config_files")
def to_dict(self) -> dict:
"""Convert preset to dictionary for API responses.
Returns:
dict: Preset data as dictionary
Example:
>>> preset.to_dict()
{
'id': '123e4567-e89b-12d3-a456-426614174000',
'name': 'godot',
'display_name': 'Godot Engine 4.x',
'framework_type': 'game_engine',
'test_command': 'godot --headless -s ...',
...
}
"""
return {
"id": str(self.id),
"name": self.name,
"display_name": self.display_name,
"description": self.description,
"framework_type": self.framework_type,
"language": self.language,
"test_command": self.test_command,
"build_command": self.build_command,
"lint_command": self.lint_command,
"format_command": self.format_command,
"config_files": self.config_files or {},
"is_builtin": self.is_builtin,
"created_at": self.created_at.isoformat() if self.created_at else None,
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
}
@staticmethod
def create_builtin_presets() -> List["FrameworkPreset"]:
"""Create list of built-in framework presets.
Returns:
List[FrameworkPreset]: All built-in presets
Example:
>>> from lazy_bird.core import SessionLocal
>>> from lazy_bird.models import FrameworkPreset
>>>
>>> with SessionLocal() as db:
... presets = FrameworkPreset.create_builtin_presets()
... db.add_all(presets)
... db.commit()
"""
return [
# ================================================================
# GAME ENGINES
# ================================================================
FrameworkPreset(
name="godot",
display_name="Godot Engine 4.x",
description="Open-source game engine with GDScript",
framework_type="game_engine",
language="gdscript",
test_command="godot --headless -s addons/gdUnit4/bin/GdUnitCmdTool.gd --test-suite all",
build_command='godot --headless --export-release "Linux/X11" build/game.x86_64',
config_files={"godot": "project.godot"},
is_builtin=True,
),
FrameworkPreset(
name="unity",
display_name="Unity",
description="Popular game engine with C# scripting",
framework_type="game_engine",
language="csharp",
test_command="unity-editor -runTests -batchmode -projectPath .",
build_command="unity-editor -quit -batchmode -projectPath . -buildLinux64Player build/game",
config_files={"unity": "ProjectSettings/ProjectSettings.asset"},
is_builtin=True,
),
FrameworkPreset(
name="bevy",
display_name="Bevy Engine",
description="Rust game engine",
framework_type="game_engine",
language="rust",
test_command="cargo test",
build_command="cargo build --release",
config_files={"cargo": "Cargo.toml"},
is_builtin=True,
),
# ================================================================
# BACKEND FRAMEWORKS
# ================================================================
FrameworkPreset(
name="django",
display_name="Django",
description="Python web framework",
framework_type="backend",
language="python",
test_command="pytest",
build_command="python manage.py collectstatic --noinput",
lint_command="pylint *.py",
format_command="black .",
config_files={"python": "pyproject.toml", "django": "manage.py"},
is_builtin=True,
),
FrameworkPreset(
name="fastapi",
display_name="FastAPI",
description="Modern Python API framework",
framework_type="backend",
language="python",
test_command="pytest",
lint_command="flake8 .",
format_command="black .",
config_files={"python": "pyproject.toml"},
is_builtin=True,
),
FrameworkPreset(
name="flask",
display_name="Flask",
description="Lightweight Python web framework",
framework_type="backend",
language="python",
test_command="pytest",
lint_command="flake8 .",
format_command="black .",
config_files={"python": "pyproject.toml"},
is_builtin=True,
),
FrameworkPreset(
name="rails",
display_name="Ruby on Rails",
description="Ruby web framework",
framework_type="backend",
language="ruby",
test_command="rails test",
lint_command="rubocop",
config_files={"ruby": "Gemfile"},
is_builtin=True,
),
# ================================================================
# FRONTEND FRAMEWORKS
# ================================================================
FrameworkPreset(
name="react",
display_name="React + Vite",
description="React with Vite build tool",
framework_type="frontend",
language="javascript",
test_command="npm test",
build_command="npm run build",
lint_command="npm run lint",
format_command="npm run format",
config_files={"package": "package.json", "vite": "vite.config.js"},
is_builtin=True,
),
FrameworkPreset(
name="vue",
display_name="Vue.js",
description="Progressive JavaScript framework",
framework_type="frontend",
language="javascript",
test_command="npm test",
build_command="npm run build",
lint_command="npm run lint",
config_files={"package": "package.json"},
is_builtin=True,
),
FrameworkPreset(
name="angular",
display_name="Angular",
description="TypeScript-based web framework",
framework_type="frontend",
language="typescript",
test_command="ng test",
build_command="ng build --prod",
lint_command="ng lint",
config_files={"package": "package.json", "angular": "angular.json"},
is_builtin=True,
),
FrameworkPreset(
name="svelte",
display_name="Svelte",
description="Compiler-based JavaScript framework",
framework_type="frontend",
language="javascript",
test_command="npm test",
build_command="npm run build",
lint_command="npm run lint",
config_files={"package": "package.json", "svelte": "svelte.config.js"},
is_builtin=True,
),
# ================================================================
# LANGUAGES (generic)
# ================================================================
FrameworkPreset(
name="python",
display_name="Python (Generic)",
description="Generic Python project",
framework_type="language",
language="python",
test_command="pytest",
lint_command="flake8 .",
format_command="black .",
config_files={"python": "pyproject.toml"},
is_builtin=True,
),
FrameworkPreset(
name="rust",
display_name="Rust",
description="Rust programming language",
framework_type="language",
language="rust",
test_command="cargo test",
build_command="cargo build --release",
lint_command="cargo clippy",
format_command="cargo fmt",
config_files={"cargo": "Cargo.toml"},
is_builtin=True,
),
FrameworkPreset(
name="nodejs",
display_name="Node.js",
description="JavaScript runtime",
framework_type="language",
language="javascript",
test_command="npm test",
lint_command="npm run lint",
format_command="npm run format",
config_files={"package": "package.json"},
is_builtin=True,
),
FrameworkPreset(
name="go",
display_name="Go",
description="Go programming language",
framework_type="language",
language="go",
test_command="go test ./...",
build_command="go build",
lint_command="golangci-lint run",
format_command="go fmt ./...",
config_files={"go": "go.mod"},
is_builtin=True,
),
]
| """FrameworkPreset model - Framework-specific command presets.
This module defines the FrameworkPreset model which stores predefined
configurations for different frameworks (Godot, Django, React, etc.).
"""
import uuid
from datetime import datetime
from typing import Any, Dict, List, Optional
from sqlalchemy import Boolean, Column, DateTime, String, Text, func
from sqlalchemy.dialects.postgresql import JSONB, UUID
from sqlalchemy.orm import Mapped, mapped_column, relationship
from lazy_bird.core.database import Base
class FrameworkPreset(Base):
"""FrameworkPreset model for framework-specific configurations.
A FrameworkPreset defines default commands and settings for a specific
framework (e.g., Godot, Django, React). Lazy-Bird includes built-in
presets for 15+ frameworks, and users can create custom presets.
Attributes:
id: Unique preset identifier (UUID)
name: Internal preset name (unique, lowercase)
display_name: Human-readable display name
description: Preset description
framework_type: Framework category (game_engine, backend, frontend, language)
language: Programming language (gdscript, python, javascript, rust, etc.)
test_command: Command to run tests (required)
build_command: Command to build project (optional)
lint_command: Command to lint code (optional)
format_command: Command to format code (optional)
config_files: JSON object with framework-specific configuration file paths
is_builtin: Whether this is a built-in preset (cannot be deleted)
created_at: Creation timestamp
updated_at: Last update timestamp
projects: Relationship to Project models using this preset | [
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/sql/sqltypes.py\nBoolean",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/dialects/postgresql/json.py\nJSONB",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/orm/base.py\nMapped",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/orm/_orm_constructors.py\nmapped_column"
] | yusufkaraaslan/lazy-bird | lazy_bird/models/framework_preset.py |
"""DailyUsage model - Daily usage tracking and billing.
This module defines the DailyUsage model for tracking daily metrics
and resource consumption per project.
"""
import uuid
from datetime import date, datetime
from decimal import Decimal
from sqlalchemy import (
BigInteger,
Column,
Date,
DateTime,
ForeignKey,
Index,
Integer,
Numeric,
UniqueConstraint,
func,
)
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import Mapped, mapped_column, relationship
from lazy_bird.core.database import Base
class DailyUsage(Base):
"""DailyUsage model for daily usage tracking.
Attributes:
id: Unique record identifier
project_id: Reference to project
date: Usage date
tasks_queued: Number of tasks queued
tasks_completed: Number of tasks completed
tasks_failed: Number of tasks failed
total_tokens_used: Total tokens consumed
total_cost_usd: Total cost in USD
total_duration_seconds: Total execution time
created_at: Creation timestamp
updated_at: Last update timestamp
"""
__tablename__ = "daily_usage"
id: Mapped[uuid.UUID] = mapped_column(
UUID(as_uuid=True),
primary_key=True,
default=uuid.uuid4,
server_default=func.gen_random_uuid(),
nullable=False,
)
project_id: Mapped[uuid.UUID] = mapped_column(
UUID(as_uuid=True),
ForeignKey("projects.id", ondelete="CASCADE"),
nullable=False,
)
date: Mapped[date] = mapped_column(
Date,
nullable=False,
)
tasks_queued: Mapped[int] = mapped_column(
Integer,
nullable=False,
server_default="0",
default=0,
)
tasks_completed: Mapped[int] = mapped_column(
Integer,
nullable=False,
server_default="0",
default=0,
)
tasks_failed: Mapped[int] = mapped_column(
Integer,
nullable=False,
server_default="0",
default=0,
)
total_tokens_used: Mapped[int] = mapped_column(
BigInteger,
nullable=False,
server_default="0",
default=0,
)
total_cost_usd: Mapped[Decimal] = mapped_column(
Numeric(10, 4),
nullable=False,
server_default="0",
default=Decimal("0"),
)
total_duration_seconds: Mapped[int] = mapped_column(
BigInteger,
nullable=False,
server_default="0",
default=0,
)
created_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True),
nullable=False,
server_default=func.current_timestamp(),
default=func.current_timestamp(),
)
updated_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True),
nullable=False,
server_default=func.current_timestamp(),
default=func.current_timestamp(),
onupdate=func.current_timestamp(),
)
project: Mapped["Project"] = relationship(
"Project",
back_populates="daily_usages",
foreign_keys=[project_id],
)
__table_args__ = (
UniqueConstraint(project_id, date, name="uq_daily_usage_project_date"),
Index("idx_daily_usage_project_date", project_id, date.desc()),
Index("idx_daily_usage_date", date.desc()),
{"comment": "Daily usage tracking and billing"},
)
def __repr__(self) -> str:
return f"<DailyUsage(project_id={self.project_id}, date={self.date}, completed={self.tasks_completed})>"
| """DailyUsage model - Daily usage tracking and billing.
This module defines the DailyUsage model for tracking daily metrics
and resource consumption per project.
"""
import uuid
from datetime import date, datetime
from decimal import Decimal
from sqlalchemy import (
BigInteger,
Column,
Date,
DateTime,
ForeignKey,
Index,
Integer,
Numeric,
UniqueConstraint,
func,
)
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import Mapped, mapped_column, relationship
from lazy_bird.core.database import Base
class DailyUsage(Base):
"""DailyUsage model for daily usage tracking.
Attributes:
id: Unique record identifier
project_id: Reference to project
date: Usage date
tasks_queued: Number of tasks queued
tasks_completed: Number of tasks completed
tasks_failed: Number of tasks failed
total_tokens_used: Total tokens consumed
total_cost_usd: Total cost in USD
total_duration_seconds: Total execution time
created_at: Creation timestamp
updated_at: Last update timestamp
"""
__tablename__ = "daily_usage"
id: Mapped[uuid.UUID] = mapped_column(
UUID(as_uuid=True),
primary_key=True,
default=uuid.uuid4,
server_default=func.gen_random_uuid(),
nullable=False,
)
project_id: | [
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/sql/sqltypes.py\nBigInteger",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/orm/base.py\nMapped",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/orm/_orm_constructors.py\nmapped_column",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/sql/schema.py\nForeignKey"
] | yusufkaraaslan/lazy-bird | lazy_bird/models/daily_usage.py |
"""ClaudeAccount model - Claude API credentials and settings.
This module defines the ClaudeAccount model which stores Claude API credentials,
configuration, and usage limits. Supports both API mode and subscription mode.
"""
import uuid
from datetime import datetime
from decimal import Decimal
from enum import Enum
from typing import List, Optional
from sqlalchemy import (
Boolean,
CheckConstraint,
Column,
DateTime,
Integer,
Numeric,
String,
Text,
func,
)
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import Mapped, mapped_column, relationship, validates
from lazy_bird.core.database import Base
class AccountType(str, Enum):
"""Claude account type enumeration.
Attributes:
API: Use Anthropic API with API key
SUBSCRIPTION: Use Claude subscription with session token
"""
API = "api"
SUBSCRIPTION = "subscription"
class ClaudeAccount(Base):
"""ClaudeAccount model for storing Claude API credentials.
A ClaudeAccount represents authentication and configuration for accessing
Claude AI. It supports two modes:
- API mode: Uses Anthropic API key
- Subscription mode: Uses Claude subscription session token
Attributes:
id: Unique account identifier (UUID)
name: Human-readable account name
account_type: Account type (api or subscription)
api_key: API key for API mode (encrypted at application layer)
config_directory: Config directory for subscription mode
session_token: Session token for subscription mode (encrypted)
model: Claude model to use (e.g., claude-sonnet-4-5)
max_tokens: Maximum tokens per request
temperature: Model temperature (0.0-1.0)
monthly_budget_usd: Monthly spending limit
is_active: Whether account is active
created_at: Creation timestamp
updated_at: Last update timestamp
last_used_at: Last time account was used
projects: Relationship to Project models
"""
__tablename__ = "claude_accounts"
# -------------------------------------------------------------------------
# PRIMARY KEY
# -------------------------------------------------------------------------
id: Mapped[uuid.UUID] = mapped_column(
UUID(as_uuid=True),
primary_key=True,
default=uuid.uuid4,
server_default=func.gen_random_uuid(),
nullable=False,
)
# -------------------------------------------------------------------------
# BASIC INFORMATION
# -------------------------------------------------------------------------
name: Mapped[str] = mapped_column(
String(255), nullable=False, comment="Human-readable account name"
)
account_type: Mapped[str] = mapped_column(
String(50),
nullable=False,
index=True, # idx_claude_accounts_type
comment="Account type: api or subscription",
)
# -------------------------------------------------------------------------
# API MODE CREDENTIALS
# -------------------------------------------------------------------------
api_key: Mapped[Optional[str]] = mapped_column(
String(500), nullable=True, comment="Anthropic API key (encrypted at application layer)"
)
# -------------------------------------------------------------------------
# SUBSCRIPTION MODE CREDENTIALS
# -------------------------------------------------------------------------
config_directory: Mapped[Optional[str]] = mapped_column(
String(500), nullable=True, comment="Config directory path for subscription mode"
)
session_token: Mapped[Optional[str]] = mapped_column(
String(500),
nullable=True,
comment="Session token for subscription mode (encrypted at application layer)",
)
# -------------------------------------------------------------------------
# CLAUDE SETTINGS
# -------------------------------------------------------------------------
model: Mapped[str] = mapped_column(
String(100),
nullable=False,
server_default="claude-sonnet-4-5",
default="claude-sonnet-4-5",
comment="Claude model identifier",
)
max_tokens: Mapped[int] = mapped_column(
Integer,
nullable=False,
server_default="8000",
default=8000,
comment="Maximum tokens per request",
)
temperature: Mapped[Decimal] = mapped_column(
Numeric(3, 2),
nullable=False,
server_default="0.7",
default=Decimal("0.7"),
comment="Model temperature (0.00-1.00)",
)
# -------------------------------------------------------------------------
# USAGE LIMITS
# -------------------------------------------------------------------------
monthly_budget_usd: Mapped[Optional[Decimal]] = mapped_column(
Numeric(10, 2), nullable=True, comment="Monthly spending limit in USD"
)
is_active: Mapped[bool] = mapped_column(
Boolean,
nullable=False,
server_default="true",
default=True,
index=True, # idx_claude_accounts_active
comment="Whether account is active and can be used",
)
# -------------------------------------------------------------------------
# METADATA
# -------------------------------------------------------------------------
created_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True),
nullable=False,
server_default=func.current_timestamp(),
default=func.current_timestamp(),
comment="Creation timestamp",
)
updated_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True),
nullable=False,
server_default=func.current_timestamp(),
default=func.current_timestamp(),
onupdate=func.current_timestamp(),
comment="Last update timestamp",
)
last_used_at: Mapped[Optional[datetime]] = mapped_column(
DateTime(timezone=True), nullable=True, comment="Last time this account was used for a task"
)
# -------------------------------------------------------------------------
# RELATIONSHIPS
# -------------------------------------------------------------------------
# Projects using this account (one-to-many)
projects: Mapped[List["Project"]] = relationship(
"Project",
back_populates="claude_account",
foreign_keys="Project.claude_account_id",
lazy="dynamic", # Return query object for filtering
)
# Task runs using this account (one-to-many)
task_runs: Mapped[List["TaskRun"]] = relationship(
"TaskRun",
back_populates="claude_account",
foreign_keys="TaskRun.claude_account_id",
lazy="dynamic",
)
# -------------------------------------------------------------------------
# TABLE ARGUMENTS (constraints, indexes)
# -------------------------------------------------------------------------
__table_args__ = (
# Check constraint: account_type must be 'api' or 'subscription'
CheckConstraint(
"account_type IN ('api', 'subscription')",
name="check_account_type",
),
# Check constraint: API mode requires api_key, subscription mode requires config_directory
CheckConstraint(
"(account_type = 'api' AND api_key IS NOT NULL) OR "
"(account_type = 'subscription' AND config_directory IS NOT NULL)",
name="check_api_key_required",
),
# Check constraint: temperature must be between 0.0 and 1.0
CheckConstraint(
"temperature >= 0.0 AND temperature <= 1.0",
name="check_temperature_range",
),
# Check constraint: max_tokens must be positive
CheckConstraint(
"max_tokens > 0",
name="check_max_tokens_positive",
),
# Check constraint: monthly_budget must be positive if set
CheckConstraint(
"monthly_budget_usd IS NULL OR monthly_budget_usd > 0",
name="check_monthly_budget_positive",
),
# Table comment
{"comment": "Claude API credentials and configuration"},
)
# -------------------------------------------------------------------------
# VALIDATORS
# -------------------------------------------------------------------------
@validates("account_type")
def validate_account_type(self, key: str, value: str) -> str:
"""Validate account_type is valid.
Args:
key: Field name
value: Account type value
Returns:
str: Validated account type
Raises:
ValueError: If account type is invalid
"""
if value not in ("api", "subscription"):
raise ValueError(f"account_type must be 'api' or 'subscription', got '{value}'")
return value
@validates("temperature")
def validate_temperature(self, key: str, value: Decimal) -> Decimal:
"""Validate temperature is in valid range.
Args:
key: Field name
value: Temperature value
Returns:
Decimal: Validated temperature
Raises:
ValueError: If temperature is out of range
"""
if value < 0 or value > 1:
raise ValueError(f"temperature must be between 0.0 and 1.0, got {value}")
return value
@validates("max_tokens")
def validate_max_tokens(self, key: str, value: int) -> int:
"""Validate max_tokens is in valid range.
Args:
key: Field name
value: Max tokens value
Returns:
int: Validated max tokens
Raises:
ValueError: If max_tokens is out of valid range
"""
if value < 1 or value > 200000:
raise ValueError(f"max_tokens must be between 1 and 200000, got {value}")
return value
# -------------------------------------------------------------------------
# METHODS
# -------------------------------------------------------------------------
def __repr__(self) -> str:
"""String representation of ClaudeAccount."""
return (
f"<ClaudeAccount(id={self.id}, name='{self.name}', "
f"type='{self.account_type}', model='{self.model}', "
f"active={self.is_active})>"
)
def is_api_mode(self) -> bool:
"""Check if account is in API mode.
Returns:
bool: True if API mode, False if subscription mode
"""
return self.account_type == "api"
def is_subscription_mode(self) -> bool:
"""Check if account is in subscription mode.
Returns:
bool: True if subscription mode, False if API mode
"""
return self.account_type == "subscription"
def has_budget_limit(self) -> bool:
"""Check if account has a monthly budget limit set.
Returns:
bool: True if monthly budget is set, False otherwise
"""
return self.monthly_budget_usd is not None
def update_last_used(self) -> None:
"""Update the last_used_at timestamp to current time."""
self.last_used_at = func.current_timestamp()
def deactivate(self) -> None:
"""Deactivate the account (prevents further use)."""
self.is_active = False
def activate(self) -> None:
"""Activate the account (allows use)."""
self.is_active = True
def get_api_key_preview(self) -> Optional[str]:
"""Get a safe preview of the API key.
Returns first 8 characters and last 4 characters for identification,
with the middle part masked.
Returns:
Optional[str]: Masked API key preview, or None if not set
Example:
>>> account.api_key = "sk-ant-1234567890abcdefghij"
>>> account.get_api_key_preview()
'sk-ant-1***ghij'
"""
if not self.api_key:
return None
if len(self.api_key) <= 12:
# Key too short to preview safely
return f"{self.api_key[:4]}***"
# Show first 8 and last 4 characters
return f"{self.api_key[:8]}***{self.api_key[-4:]}"
def to_dict(self, include_sensitive: bool = False) -> dict:
"""Convert account to dictionary.
Args:
include_sensitive: Whether to include sensitive fields (api_key, session_token)
Returns:
dict: Account data as dictionary
Example:
>>> account.to_dict()
{
'id': '123e4567-e89b-12d3-a456-426614174000',
'name': 'Production API',
'account_type': 'api',
'model': 'claude-sonnet-4-5',
'is_active': True,
...
}
"""
data = {
"id": str(self.id),
"name": self.name,
"account_type": self.account_type,
"model": self.model,
"max_tokens": self.max_tokens,
"temperature": float(self.temperature) if self.temperature is not None else None,
"monthly_budget_usd": (
float(self.monthly_budget_usd) if self.monthly_budget_usd else None
),
"is_active": self.is_active,
"created_at": self.created_at.isoformat() if self.created_at else None,
"updated_at": self.updated_at.isoformat() if self.updated_at else None,
"last_used_at": self.last_used_at.isoformat() if self.last_used_at else None,
}
if include_sensitive:
data["api_key"] = self.api_key
data["config_directory"] = self.config_directory
data["session_token"] = self.session_token
else:
# Include safe previews only
data["api_key_preview"] = self.get_api_key_preview()
return data
| """ClaudeAccount model - Claude API credentials and settings.
This module defines the ClaudeAccount model which stores Claude API credentials,
configuration, and usage limits. Supports both API mode and subscription mode.
"""
import uuid
from datetime import datetime
from decimal import Decimal
from enum import Enum
from typing import List, Optional
from sqlalchemy import (
Boolean,
CheckConstraint,
Column,
DateTime,
Integer,
Numeric,
String,
Text,
func,
)
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import Mapped, mapped_column, relationship, validates
from lazy_bird.core.database import Base
class AccountType(str, Enum):
"""Claude account type enumeration.
Attributes:
API: Use Anthropic API with API key
SUBSCRIPTION: Use Claude subscription with session token
"""
API = "api"
SUBSCRIPTION = "subscription"
class ClaudeAccount(Base):
"""ClaudeAccount model for storing Claude API credentials.
A ClaudeAccount represents authentication and configuration for accessing
Claude AI. It supports two modes:
- API mode: Uses Anthropic API key
- Subscription mode: Uses Claude subscription session token
Attributes:
id: Unique account identifier (UUID)
name: Human-readable account name
account_type: Account type (api or subscription)
api_key: API key for API mode (encrypted at application layer)
config_directory: Config directory for subscription mode
session_token: Session token for subscription mode (encrypted)
model: Claude model to use (e.g., claude-sonnet-4-5)
max | [
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/sql/sqltypes.py\nBoolean",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/orm/base.py\nMapped",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/orm/_orm_constructors.py\nmapped_column",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/sql/schema.py\nCheckConstraint",
"# sqlalchemy/sqlalchemy... | yusufkaraaslan/lazy-bird | lazy_bird/models/claude_account.py |
"""ApiKey model - API authentication tokens.
This module defines the ApiKey model for API authentication and authorization.
"""
import uuid
from datetime import datetime
from typing import List, Optional
from sqlalchemy import (
Boolean,
CheckConstraint,
Column,
DateTime,
ForeignKey,
Index,
String,
func,
text,
)
from sqlalchemy.dialects.postgresql import ARRAY, UUID
from sqlalchemy.orm import Mapped, mapped_column, relationship
from lazy_bird.core.database import Base
class ApiKey(Base):
"""ApiKey model for API authentication.
Attributes:
id: Unique key identifier
key_hash: SHA-256 hash of actual key
key_prefix: First 8 chars for identification
name: Human-readable key name
project_id: Project ID (NULL for organization-level)
scopes: Array of permission scopes
is_active: Whether key is active
expires_at: Expiration timestamp
last_used_at: Last usage timestamp
created_by: Creator identifier
created_at: Creation timestamp
revoked_at: Revocation timestamp
"""
__tablename__ = "api_keys"
id: Mapped[uuid.UUID] = mapped_column(
UUID(as_uuid=True),
primary_key=True,
default=uuid.uuid4,
server_default=func.gen_random_uuid(),
nullable=False,
)
key_hash: Mapped[str] = mapped_column(
String(64), unique=True, nullable=False, index=True, comment="SHA-256 hash of actual key"
)
key_prefix: Mapped[str] = mapped_column(
String(10), nullable=False, index=True, comment="First 8 chars for identification"
)
name: Mapped[str] = mapped_column(
String(255),
nullable=False,
)
project_id: Mapped[Optional[uuid.UUID]] = mapped_column(
UUID(as_uuid=True),
ForeignKey("projects.id", ondelete="CASCADE"),
nullable=True,
index=True,
comment="Project ID (NULL for organization-level)",
)
scopes: Mapped[List[str]] = mapped_column(
ARRAY(String),
nullable=False,
server_default=text("'{read}'"),
default=lambda: ["read"],
)
is_active: Mapped[bool] = mapped_column(
Boolean,
nullable=False,
server_default="true",
default=True,
index=True,
)
expires_at: Mapped[Optional[datetime]] = mapped_column(
DateTime(timezone=True),
nullable=True,
)
last_used_at: Mapped[Optional[datetime]] = mapped_column(
DateTime(timezone=True),
nullable=True,
)
created_by: Mapped[Optional[str]] = mapped_column(
String(255),
nullable=True,
)
created_at: Mapped[datetime] = mapped_column(
DateTime(timezone=True),
nullable=False,
server_default=func.current_timestamp(),
default=func.current_timestamp(),
)
revoked_at: Mapped[Optional[datetime]] = mapped_column(
DateTime(timezone=True),
nullable=True,
)
project: Mapped[Optional["Project"]] = relationship(
"Project",
back_populates="api_keys",
foreign_keys=[project_id],
)
__table_args__ = (
CheckConstraint(
"scopes <@ ARRAY['read', 'write', 'admin']::VARCHAR[]",
name="check_scopes",
),
{"comment": "API authentication tokens"},
)
def __repr__(self) -> str:
return f"<ApiKey(id={self.id}, name='{self.name}', prefix='{self.key_prefix}', active={self.is_active})>"
| """ApiKey model - API authentication tokens.
This module defines the ApiKey model for API authentication and authorization.
"""
import uuid
from datetime import datetime
from typing import List, Optional
from sqlalchemy import (
Boolean,
CheckConstraint,
Column,
DateTime,
ForeignKey,
Index,
String,
func,
text,
)
from sqlalchemy.dialects.postgresql import ARRAY, UUID
from sqlalchemy.orm import Mapped, mapped_column, relationship
from lazy_bird.core.database import Base
class ApiKey(Base):
"""ApiKey model for API authentication.
Attributes:
id: Unique key identifier
key_hash: SHA-256 hash of actual key
key_prefix: First 8 chars for identification
name: Human-readable key name
project_id: Project ID (NULL for organization-level)
scopes: Array of permission scopes
is_active: Whether key is active
expires_at: Expiration timestamp
last_used_at: Last usage timestamp
created_by: Creator identifier
created_at: Creation timestamp
revoked_at: Revocation timestamp
"""
__tablename__ = "api_keys"
id: Mapped[uuid.UUID] = mapped_column(
UUID(as_uuid=True),
primary_key=True,
default=uuid.uuid4,
server_default=func.gen_random_uuid(),
nullable=False,
)
| [
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/sql/sqltypes.py\nBoolean",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/orm/base.py\nMapped",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/orm/_orm_constructors.py\nmapped_column",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/sql/schema.py\nForeignKey",
"# sqlalchemy/sqlalchemy:lib/... | yusufkaraaslan/lazy-bird | lazy_bird/models/api_key.py |
"""Database models for Lazy-Bird.
This package contains all SQLAlchemy ORM models for the application.
All models inherit from Base and are registered with Alembic for migrations.
Models:
- Project: Project configurations and settings
- ClaudeAccount: Claude API credentials and settings
- FrameworkPreset: Framework-specific command presets
- TaskRun: Task execution records
- TaskRunLog: Detailed execution logs
- WebhookSubscription: Webhook endpoint registrations
- DailyUsage: Daily usage tracking and billing
- ApiKey: API authentication tokens
"""
from lazy_bird.models.api_key import ApiKey
from lazy_bird.models.claude_account import ClaudeAccount
from lazy_bird.models.daily_usage import DailyUsage
from lazy_bird.models.framework_preset import FrameworkPreset
from lazy_bird.models.project import Project
from lazy_bird.models.task_run import TaskRun
from lazy_bird.models.task_run_log import TaskRunLog
from lazy_bird.models.user import User
from lazy_bird.models.webhook_subscription import WebhookSubscription
__all__ = [
"Project",
"ClaudeAccount",
"FrameworkPreset",
"TaskRun",
"TaskRunLog",
"WebhookSubscription",
"DailyUsage",
"ApiKey",
"User",
]
| """Database models for Lazy-Bird.
This package contains all SQLAlchemy ORM models for the application.
All models inherit from Base and are registered with Alembic for migrations.
Models:
- Project: Project configurations and settings
- ClaudeAccount: Claude API credentials and settings
- FrameworkPreset: Framework-specific command presets
- TaskRun: Task execution records
- TaskRunLog: Detailed execution logs
- WebhookSubscription: Webhook endpoint registrations
- DailyUsage: Daily usage tracking and billing
- ApiKey: API authentication tokens
"""
from lazy_bird.models.api_key import ApiKey
from lazy_bird.models.claude_account import ClaudeAccount
from lazy_bird.models.daily_usage import DailyUsage
from lazy_bird.models.framework_preset import FrameworkPreset
from lazy_bird.models.project import Project
from lazy_bird.models.task_run import TaskRun
from lazy_bird.models.task_run_log import TaskRunLog
from lazy_bird.models.user import User
from lazy_bird.models.webhook_subscription import WebhookSubscription
__all__ = [
"Project",
"ClaudeAccount",
"FrameworkPreset",
"TaskRun",
"TaskRunLog",
"WebhookSubscription",
"DailyUsage",
"ApiKey",
"User",
]
| [
"# yusufkaraaslan/lazy-bird:lazy_bird/models/api_key.py\nApiKey",
"# yusufkaraaslan/lazy-bird:lazy_bird/models/claude_account.py\nClaudeAccount",
"# yusufkaraaslan/lazy-bird:lazy_bird/models/daily_usage.py\nDailyUsage",
"# yusufkaraaslan/lazy-bird:lazy_bird/models/framework_preset.py\nFrameworkPreset",
"# y... | yusufkaraaslan/lazy-bird | lazy_bird/models/__init__.py |
"""Security utilities for authentication and authorization.
This module provides utilities for:
- API key generation and verification
- JWT token creation and verification
- Password hashing and verification
"""
import hashlib
import secrets
from datetime import datetime, timedelta
from typing import Any, Dict, Optional
import bcrypt
from jose import JWTError, jwt
from lazy_bird.core.config import settings
# JWT algorithm
ALGORITHM = "HS256"
def generate_api_key(prefix: str = "lb", length: int = 32) -> str:
"""Generate a secure random API key.
Args:
prefix: Key prefix for identification (default: "lb")
length: Length of random portion in bytes (default: 32)
Returns:
str: API key in format "prefix_randomstring"
Example:
>>> key = generate_api_key()
>>> key.startswith("lb_")
True
>>> len(key)
67 # "lb_" (3) + 64 hex chars (32 bytes * 2)
"""
random_bytes = secrets.token_bytes(length)
random_string = random_bytes.hex()
return f"{prefix}_{random_string}"
def hash_api_key(api_key: str) -> str:
"""Hash an API key using SHA-256.
Args:
api_key: Raw API key to hash
Returns:
str: Hexadecimal hash of the API key
Example:
>>> key = "lb_abc123"
>>> hashed = hash_api_key(key)
>>> len(hashed)
64 # SHA-256 produces 64 hex characters
"""
return hashlib.sha256(api_key.encode()).hexdigest()
def verify_api_key(raw_key: str, hashed_key: str) -> bool:
"""Verify an API key against its hash.
Args:
raw_key: Raw API key to verify
hashed_key: Stored hash to compare against
Returns:
bool: True if key matches hash, False otherwise
Example:
>>> key = "lb_abc123"
>>> hashed = hash_api_key(key)
>>> verify_api_key(key, hashed)
True
>>> verify_api_key("wrong_key", hashed)
False
"""
return hash_api_key(raw_key) == hashed_key
def get_api_key_prefix(api_key: str) -> str:
"""Extract the prefix from an API key (first 8 characters).
Args:
api_key: Full API key
Returns:
str: First 8 characters for display/identification
Example:
>>> key = "lb_abc123def456"
>>> get_api_key_prefix(key)
'lb_abc12'
"""
return api_key[:8] if len(api_key) >= 8 else api_key
def hash_password(password: str) -> str:
"""Hash a password using bcrypt.
Args:
password: Plain text password
Returns:
str: Bcrypt hash of the password
Example:
>>> hashed = hash_password("mypassword")
>>> hashed.startswith("$2b$")
True
"""
# bcrypt has a 72-byte limit
pw_bytes = password[:72].encode("utf-8")
return bcrypt.hashpw(pw_bytes, bcrypt.gensalt()).decode("utf-8")
def verify_password(plain_password: str, hashed_password: str) -> bool:
"""Verify a password against its hash.
Args:
plain_password: Plain text password to verify
hashed_password: Stored bcrypt hash
Returns:
bool: True if password matches hash, False otherwise
Example:
>>> hashed = hash_password("mypassword")
>>> verify_password("mypassword", hashed)
True
>>> verify_password("wrongpassword", hashed)
False
"""
pw_bytes = plain_password[:72].encode("utf-8")
return bcrypt.checkpw(pw_bytes, hashed_password.encode("utf-8"))
def create_access_token(
data: Dict[str, Any],
expires_delta: Optional[timedelta] = None,
) -> str:
"""Create a JWT access token.
Args:
data: Dictionary of claims to encode in the token
expires_delta: Token expiration time (default: from settings)
Returns:
str: Encoded JWT token
Example:
>>> token = create_access_token({"sub": "user123"})
>>> len(token) > 0
True
>>> "." in token # JWT has 3 parts separated by dots
True
"""
to_encode = data.copy()
# Set expiration time
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(minutes=settings.JWT_ACCESS_TOKEN_EXPIRE_MINUTES)
to_encode.update({"exp": expire})
# Encode token
encoded_jwt = jwt.encode(
to_encode,
settings.JWT_SECRET_KEY,
algorithm=ALGORITHM,
)
return encoded_jwt
def verify_token(token: str) -> Optional[Dict[str, Any]]:
"""Verify and decode a JWT token.
Args:
token: JWT token string
Returns:
Optional[Dict[str, Any]]: Decoded token payload if valid, None otherwise
Example:
>>> token = create_access_token({"sub": "user123", "email": "user@example.com"})
>>> payload = verify_token(token)
>>> payload is not None
True
>>> payload["sub"]
'user123'
>>> verify_token("invalid_token") is None
True
"""
try:
payload = jwt.decode(
token,
settings.JWT_SECRET_KEY,
algorithms=[ALGORITHM],
)
return payload
except JWTError:
return None
def create_refresh_token(
data: Dict[str, Any],
expires_delta: Optional[timedelta] = None,
) -> str:
"""Create a JWT refresh token with longer expiration.
Args:
data: Dictionary of claims to encode in the token
expires_delta: Token expiration time (default: from settings)
Returns:
str: Encoded JWT refresh token
Example:
>>> token = create_refresh_token({"sub": "user123"})
>>> len(token) > 0
True
"""
to_encode = data.copy()
# Set expiration time (longer than access token)
if expires_delta:
expire = datetime.utcnow() + expires_delta
else:
expire = datetime.utcnow() + timedelta(minutes=settings.JWT_REFRESH_TOKEN_EXPIRE_MINUTES)
to_encode.update({"exp": expire, "type": "refresh"})
# Encode token
encoded_jwt = jwt.encode(
to_encode,
settings.JWT_SECRET_KEY,
algorithm=ALGORITHM,
)
return encoded_jwt
def verify_refresh_token(token: str) -> Optional[Dict[str, Any]]:
"""Verify and decode a JWT refresh token.
Args:
token: JWT refresh token string
Returns:
Optional[Dict[str, Any]]: Decoded token payload if valid and is refresh token, None otherwise
Example:
>>> token = create_refresh_token({"sub": "user123"})
>>> payload = verify_refresh_token(token)
>>> payload is not None
True
>>> payload["type"]
'refresh'
"""
payload = verify_token(token)
# Verify it's a refresh token
if payload and payload.get("type") == "refresh":
return payload
return None
def generate_secure_random_string(length: int = 32) -> str:
"""Generate a cryptographically secure random string.
Args:
length: Length of the string in characters (default: 32)
Returns:
str: URL-safe random string
Example:
>>> random_str = generate_secure_random_string()
>>> len(random_str)
32
>>> random_str.isalnum() or '_' in random_str or '-' in random_str
True
"""
# Generate enough bytes to get desired character length after base64 encoding
# Base64 encoding produces 4 characters for every 3 bytes
# So we need (length * 3 / 4) bytes, rounded up
import math
num_bytes = math.ceil(length * 3 / 4)
# Generate and truncate to exact length
return secrets.token_urlsafe(num_bytes)[:length]
def constant_time_compare(val1: str, val2: str) -> bool:
"""Compare two strings in constant time to prevent timing attacks.
Args:
val1: First string
val2: Second string
Returns:
bool: True if strings are equal, False otherwise
Example:
>>> constant_time_compare("secret", "secret")
True
>>> constant_time_compare("secret", "wrong")
False
"""
return secrets.compare_digest(val1.encode(), val2.encode())
| """Security utilities for authentication and authorization.
This module provides utilities for:
- API key generation and verification
- JWT token creation and verification
- Password hashing and verification
"""
import hashlib
import secrets
from datetime import datetime, timedelta
from typing import Any, Dict, Optional
import bcrypt
from jose import JWTError, jwt
from lazy_bird.core.config import settings
# JWT algorithm
ALGORITHM = "HS256"
def generate_api_key(prefix: str = "lb", length: int = 32) -> str:
"""Generate a secure random API key.
Args:
prefix: Key prefix for identification (default: "lb")
length: Length of random portion in bytes (default: 32)
Returns:
str: API key in format "prefix_randomstring"
Example:
>>> key = generate_api_key()
>>> key.startswith("lb_")
True
>>> len(key)
67 # "lb_" (3) + 64 hex chars (32 bytes * 2)
"""
random_bytes = secrets.token_bytes(length)
random_string = random_bytes.hex()
return f"{prefix}_{random_string}"
def hash_api_key(api_key: str) -> str:
"""Hash an API key using SHA-256.
Args:
api_key: Raw API key to hash
Returns:
str: Hexadecimal hash of the API key
Example:
>>> key = "lb_abc123"
>>> hashed = hash_api_key(key)
>>> len(hashed)
64 | [] | yusufkaraaslan/lazy-bird | lazy_bird/core/security.py |
"""Redis client utilities for caching and Celery.
This module provides Redis connection management for:
- Celery message broker
- Application caching
- Session storage
"""
import logging
from typing import Optional
import redis
from redis.asyncio import Redis as AsyncRedis
from redis.exceptions import ConnectionError, RedisError
from lazy_bird.core.config import settings
logger = logging.getLogger(__name__)
# Synchronous Redis client
_redis_client: Optional[redis.Redis] = None
# Asynchronous Redis client
_async_redis_client: Optional[AsyncRedis] = None
def get_redis() -> redis.Redis:
"""Get or create synchronous Redis client.
Returns:
redis.Redis: Redis client instance
Raises:
ConnectionError: If unable to connect to Redis
Example:
>>> r = get_redis()
>>> r.set("key", "value")
>>> r.get("key")
b'value'
"""
global _redis_client
if _redis_client is None:
_redis_client = redis.from_url(
settings.REDIS_URL,
encoding="utf-8",
decode_responses=True,
max_connections=settings.REDIS_MAX_CONNECTIONS,
socket_timeout=settings.REDIS_SOCKET_TIMEOUT,
socket_connect_timeout=settings.REDIS_SOCKET_TIMEOUT,
)
logger.info("Redis client initialized", extra={"extra_fields": {"url": settings.REDIS_URL}})
return _redis_client
async def get_async_redis() -> AsyncRedis:
"""Get or create asynchronous Redis client.
Returns:
AsyncRedis: Async Redis client instance
Raises:
ConnectionError: If unable to connect to Redis
Example:
>>> r = await get_async_redis()
>>> await r.set("key", "value")
>>> await r.get("key")
'value'
"""
global _async_redis_client
if _async_redis_client is None:
_async_redis_client = await AsyncRedis.from_url(
settings.REDIS_URL,
encoding="utf-8",
decode_responses=True,
max_connections=settings.REDIS_MAX_CONNECTIONS,
socket_timeout=settings.REDIS_SOCKET_TIMEOUT,
socket_connect_timeout=settings.REDIS_SOCKET_TIMEOUT,
)
logger.info("Async Redis client initialized")
return _async_redis_client
def check_redis_connection() -> bool:
"""Check if Redis connection is working.
Returns:
bool: True if connected, False otherwise
Example:
>>> check_redis_connection()
True
"""
try:
r = get_redis()
r.ping()
logger.info("Redis connection successful")
return True
except (ConnectionError, RedisError) as e:
logger.error(f"Redis connection failed: {e}")
return False
async def check_async_redis_connection() -> bool:
"""Check if async Redis connection is working.
Returns:
bool: True if connected, False otherwise
Example:
>>> await check_async_redis_connection()
True
"""
try:
r = await get_async_redis()
await r.ping()
logger.info("Async Redis connection successful")
return True
except (ConnectionError, RedisError) as e:
logger.error(f"Async Redis connection failed: {e}")
return False
def close_redis() -> None:
"""Close synchronous Redis connection.
Example:
>>> close_redis()
"""
global _redis_client
if _redis_client is not None:
_redis_client.close()
_redis_client = None
logger.info("Redis client closed")
async def close_async_redis() -> None:
"""Close asynchronous Redis connection.
Example:
>>> await close_async_redis()
"""
global _async_redis_client
if _async_redis_client is not None:
await _async_redis_client.close()
_async_redis_client = None
logger.info("Async Redis client closed")
class RedisCache:
"""Simple Redis cache wrapper with common operations."""
def __init__(self, prefix: str = "lazy_bird:", ttl: int = 3600):
"""Initialize cache wrapper.
Args:
prefix: Key prefix for namespacing (default: "lazy_bird:")
ttl: Default time-to-live in seconds (default: 3600 = 1 hour)
"""
self.prefix = prefix
self.ttl = ttl
self.redis = get_redis()
def _make_key(self, key: str) -> str:
"""Create prefixed key.
Args:
key: Raw key
Returns:
str: Prefixed key
"""
return f"{self.prefix}{key}"
def get(self, key: str) -> Optional[str]:
"""Get value from cache.
Args:
key: Cache key
Returns:
Optional[str]: Cached value or None if not found
Example:
>>> cache = RedisCache()
>>> cache.get("user:123")
"""
try:
return self.redis.get(self._make_key(key))
except RedisError as e:
logger.error(f"Cache get failed for key {key}: {e}")
return None
def set(self, key: str, value: str, ttl: Optional[int] = None) -> bool:
"""Set value in cache.
Args:
key: Cache key
value: Value to cache
ttl: Time-to-live in seconds (default: use instance ttl)
Returns:
bool: True if successful, False otherwise
Example:
>>> cache = RedisCache()
>>> cache.set("user:123", "John Doe", ttl=300)
"""
try:
expiry = ttl if ttl is not None else self.ttl
self.redis.setex(self._make_key(key), expiry, value)
return True
except RedisError as e:
logger.error(f"Cache set failed for key {key}: {e}")
return False
def delete(self, key: str) -> bool:
"""Delete value from cache.
Args:
key: Cache key
Returns:
bool: True if deleted, False otherwise
Example:
>>> cache = RedisCache()
>>> cache.delete("user:123")
"""
try:
self.redis.delete(self._make_key(key))
return True
except RedisError as e:
logger.error(f"Cache delete failed for key {key}: {e}")
return False
def exists(self, key: str) -> bool:
"""Check if key exists in cache.
Args:
key: Cache key
Returns:
bool: True if exists, False otherwise
Example:
>>> cache = RedisCache()
>>> cache.exists("user:123")
"""
try:
return self.redis.exists(self._make_key(key)) > 0
except RedisError as e:
logger.error(f"Cache exists check failed for key {key}: {e}")
return False
def flush(self, pattern: Optional[str] = None) -> int:
"""Flush cache keys matching pattern.
Args:
pattern: Key pattern (default: flush all keys with prefix)
Returns:
int: Number of keys deleted
Example:
>>> cache = RedisCache()
>>> cache.flush("user:*") # Delete all user keys
"""
try:
search_pattern = self._make_key(pattern if pattern else "*")
keys = self.redis.keys(search_pattern)
if keys:
return self.redis.delete(*keys)
return 0
except RedisError as e:
logger.error(f"Cache flush failed: {e}")
return 0
# Convenience function for FastAPI dependency injection
def get_redis_client() -> redis.Redis:
"""FastAPI dependency for Redis client.
Yields:
redis.Redis: Redis client instance
Example:
>>> from fastapi import Depends
>>> @app.get("/")
>>> def endpoint(redis: redis.Redis = Depends(get_redis_client)):
>>> redis.set("key", "value")
"""
return get_redis()
# Convenience function for async FastAPI dependency injection
async def get_async_redis_client() -> AsyncRedis:
"""FastAPI dependency for async Redis client.
Yields:
AsyncRedis: Async Redis client instance
Example:
>>> from fastapi import Depends
>>> @app.get("/")
>>> async def endpoint(redis: AsyncRedis = Depends(get_async_redis_client)):
>>> await redis.set("key", "value")
"""
return await get_async_redis()
| """Redis client utilities for caching and Celery.
This module provides Redis connection management for:
- Celery message broker
- Application caching
- Session storage
"""
import logging
from typing import Optional
import redis
from redis.asyncio import Redis as AsyncRedis
from redis.exceptions import ConnectionError, RedisError
from lazy_bird.core.config import settings
logger = logging.getLogger(__name__)
# Synchronous Redis client
_redis_client: Optional[redis.Redis] = None
# Asynchronous Redis client
_async_redis_client: Optional[AsyncRedis] = None
def get_redis() -> redis.Redis:
"""Get or create synchronous Redis client.
Returns:
redis.Redis: Redis client instance
Raises:
ConnectionError: If unable to connect to Redis
Example:
>>> r = get_redis()
>>> r.set("key", "value")
>>> r.get("key")
b'value'
"""
global _redis_client
if _redis_client is None:
_redis_client = redis.from_url(
settings.REDIS_URL,
encoding="utf-8",
decode_responses=True,
max_connections=settings.REDIS_MAX_CONNECTIONS,
socket_timeout=settings.REDIS_SOCKET_TIMEOUT,
socket_connect_timeout=settings.REDIS_SOCKET_TIMEOUT,
)
| [
"# redis/redis-py:redis/client.py\nRedis",
"# redis/redis-py:redis/exceptions.py\nConnectionError"
] | yusufkaraaslan/lazy-bird | lazy_bird/core/redis.py |
"""Structured logging configuration for Lazy-Bird.
This module sets up structured logging with JSON formatting, correlation IDs,
and both console and file handlers.
"""
import logging
import logging.handlers
import sys
from contextvars import ContextVar
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, Optional
from lazy_bird.core.config import settings
# Context variable for request correlation ID
correlation_id_var: ContextVar[Optional[str]] = ContextVar("correlation_id", default=None)
class JSONFormatter(logging.Formatter):
"""JSON log formatter with correlation ID support.
Formats log records as JSON for structured logging.
Includes correlation ID from context if available.
"""
def format(self, record: logging.LogRecord) -> str:
"""Format log record as JSON.
Args:
record: Log record to format
Returns:
str: JSON-formatted log entry
"""
import json
# Base log data
log_data: Dict[str, Any] = {
"timestamp": datetime.utcnow().isoformat() + "Z",
"level": record.levelname,
"logger": record.name,
"message": record.getMessage(),
}
# Add correlation ID if available
correlation_id = correlation_id_var.get()
if correlation_id:
log_data["correlation_id"] = correlation_id
# Add exception info if present
if record.exc_info:
log_data["exception"] = self.formatException(record.exc_info)
# Add extra fields from record
if hasattr(record, "extra_fields"):
log_data.update(record.extra_fields)
# Add file/function/line info for debugging
if settings.DEBUG:
log_data["file"] = record.filename
log_data["function"] = record.funcName
log_data["line"] = record.lineno
return json.dumps(log_data, default=str)
class TextFormatter(logging.Formatter):
"""Human-readable text formatter with correlation ID support.
Formats log records as colored text for console output.
"""
# ANSI color codes
COLORS = {
"DEBUG": "\033[36m", # Cyan
"INFO": "\033[32m", # Green
"WARNING": "\033[33m", # Yellow
"ERROR": "\033[31m", # Red
"CRITICAL": "\033[35m", # Magenta
"RESET": "\033[0m", # Reset
}
def format(self, record: logging.LogRecord) -> str:
"""Format log record as colored text.
Args:
record: Log record to format
Returns:
str: Formatted log entry with color codes
"""
# Get color for log level
color = self.COLORS.get(record.levelname, self.COLORS["RESET"])
reset = self.COLORS["RESET"]
# Base format
timestamp = datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
level = f"{color}{record.levelname:8}{reset}"
logger = f"{record.name:20}"
message = record.getMessage()
# Add correlation ID if available
correlation_id = correlation_id_var.get()
if correlation_id:
log_str = f"{timestamp} | {level} | {logger} | [{correlation_id[:8]}] | {message}"
else:
log_str = f"{timestamp} | {level} | {logger} | {message}"
# Add exception info if present
if record.exc_info:
log_str += "\n" + self.formatException(record.exc_info)
return log_str
def setup_logging() -> None:
"""Configure application logging.
Sets up logging based on settings:
- Log level from settings.LOG_LEVEL
- Format from settings.LOG_FORMAT (json or text)
- Optional file logging from settings.LOG_FILE
- Console logging to stderr
This should be called once at application startup.
"""
# Get root logger
root_logger = logging.getLogger()
root_logger.setLevel(getattr(logging, settings.LOG_LEVEL))
# Remove existing handlers
root_logger.handlers.clear()
# Choose formatter based on settings
if settings.LOG_FORMAT == "json":
formatter = JSONFormatter()
else:
formatter = TextFormatter()
# Console handler (stderr)
console_handler = logging.StreamHandler(sys.stderr)
console_handler.setLevel(getattr(logging, settings.LOG_LEVEL))
console_handler.setFormatter(formatter)
root_logger.addHandler(console_handler)
# File handler (if configured)
if settings.LOG_FILE:
log_file = Path(settings.LOG_FILE)
log_file.parent.mkdir(parents=True, exist_ok=True)
# Use rotating file handler (10MB max, 5 backups)
file_handler = logging.handlers.RotatingFileHandler(
filename=log_file,
maxBytes=10 * 1024 * 1024, # 10 MB
backupCount=5,
encoding="utf-8",
)
file_handler.setLevel(getattr(logging, settings.LOG_LEVEL))
file_handler.setFormatter(JSONFormatter()) # Always use JSON for files
root_logger.addHandler(file_handler)
# Configure third-party library logging
logging.getLogger("uvicorn").setLevel(logging.INFO)
logging.getLogger("uvicorn.access").setLevel(logging.WARNING)
logging.getLogger("sqlalchemy").setLevel(logging.WARNING)
logging.getLogger("alembic").setLevel(logging.INFO)
# Log startup message
logger = logging.getLogger(__name__)
logger.info(
"Logging configured",
extra={
"extra_fields": {
"log_level": settings.LOG_LEVEL,
"log_format": settings.LOG_FORMAT,
"log_file": str(settings.LOG_FILE) if settings.LOG_FILE else None,
}
},
)
def set_correlation_id(correlation_id: str) -> None:
"""Set correlation ID for current context.
Args:
correlation_id: Unique request/task identifier
Example:
>>> set_correlation_id("req-abc123")
>>> logger.info("Processing request")
# Log will include correlation_id: req-abc123
"""
correlation_id_var.set(correlation_id)
def clear_correlation_id() -> None:
"""Clear correlation ID from current context."""
correlation_id_var.set(None)
def get_correlation_id() -> Optional[str]:
"""Get current correlation ID.
Returns:
Optional[str]: Correlation ID or None if not set
"""
return correlation_id_var.get()
def get_logger(name: str) -> logging.Logger:
"""Get a logger instance for a module.
Args:
name: Logger name (usually __name__)
Returns:
logging.Logger: Configured logger instance
Example:
>>> logger = get_logger(__name__)
>>> logger.info("Operation completed", extra={"extra_fields": {"user_id": 123}})
"""
return logging.getLogger(name)
# Convenience function for logging with extra fields
def log_with_context(
logger: logging.Logger,
level: int,
message: str,
**extra_fields: Any,
) -> None:
"""Log message with extra fields.
Args:
logger: Logger instance
level: Log level (logging.INFO, logging.ERROR, etc.)
message: Log message
**extra_fields: Additional fields to include in log
Example:
>>> logger = get_logger(__name__)
>>> log_with_context(logger, logging.INFO, "Task completed", task_id="abc", duration=1.5)
"""
logger.log(level, message, extra={"extra_fields": extra_fields})
| """Structured logging configuration for Lazy-Bird.
This module sets up structured logging with JSON formatting, correlation IDs,
and both console and file handlers.
"""
import logging
import logging.handlers
import sys
from contextvars import ContextVar
from datetime import datetime
from pathlib import Path
from typing import Any, Dict, Optional
from lazy_bird.core.config import settings
# Context variable for request correlation ID
correlation_id_var: ContextVar[Optional[str]] = ContextVar("correlation_id", default=None)
class JSONFormatter(logging.Formatter):
"""JSON log formatter with correlation ID support.
Formats log records as JSON for structured logging.
Includes correlation ID from context if available.
"""
def format(self, record: logging.LogRecord) -> str:
"""Format log record as JSON.
Args:
record: Log record to format
Returns:
str: JSON-formatted log entry
"""
import json
# Base log data
log_data: Dict[str, Any] = {
"timestamp": datetime.utcnow().isoformat() + "Z",
"level": record.levelname,
"logger": record.name,
"message": record.getMessage(),
}
# Add correlation ID if available
correlation_id = correlation_id_var.get()
if correlation_id:
log_data["correlation_id"] = correlation_id
# | [] | yusufkaraaslan/lazy-bird | lazy_bird/core/logging.py |
"""Database connection and session management.
This module provides the SQLAlchemy engine, session factory, and
declarative base for all database models. It uses SQLAlchemy 2.0 syntax
with async support and proper connection pooling.
"""
from typing import AsyncGenerator, Generator
from sqlalchemy import create_engine, event, pool
from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker, create_async_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session, sessionmaker
from lazy_bird.core.config import settings
# =============================================================================
# DECLARATIVE BASE
# =============================================================================
Base = declarative_base()
"""SQLAlchemy declarative base for all models.
All database models should inherit from this base class.
Example:
```python
from lazy_bird.core.database import Base
class Project(Base):
__tablename__ = "projects"
id = Column(Integer, primary_key=True)
```
"""
# =============================================================================
# SYNCHRONOUS DATABASE (for migrations, admin tasks)
# =============================================================================
engine = create_engine(
settings.DATABASE_URL,
pool_size=settings.DB_POOL_SIZE,
max_overflow=settings.DB_MAX_OVERFLOW,
pool_timeout=settings.DB_POOL_TIMEOUT,
pool_recycle=settings.DB_POOL_RECYCLE,
pool_pre_ping=True, # Verify connections before using
echo=settings.DB_ECHO, # Log SQL queries if enabled
future=True, # Use SQLAlchemy 2.0 style
)
"""Synchronous SQLAlchemy engine for database operations.
Uses connection pooling with the following configuration:
- Pool size: Configured via DB_POOL_SIZE
- Max overflow: Configured via DB_MAX_OVERFLOW
- Pool timeout: Configured via DB_POOL_TIMEOUT
- Pool recycle: Configured via DB_POOL_RECYCLE
- Pool pre-ping: Enabled (validates connections)
- Echo: Configured via DB_ECHO (development only)
Used for:
- Database migrations (Alembic)
- Admin scripts and CLI tools
- Background tasks (Celery)
"""
SessionLocal = sessionmaker(
bind=engine,
class_=Session,
autocommit=False,
autoflush=False,
expire_on_commit=False, # Keep objects usable after commit
future=True, # Use SQLAlchemy 2.0 style
)
"""Synchronous session factory for database transactions.
Creates new database sessions for each request/task.
Sessions should always be used with context managers.
Example:
```python
from lazy_bird.core.database import SessionLocal
# Using context manager (recommended)
with SessionLocal() as db:
projects = db.query(Project).all()
db.commit()
# Using FastAPI dependency
def get_db():
db = SessionLocal()
try:
yield db
finally:
db.close()
```
"""
# =============================================================================
# ASYNCHRONOUS DATABASE (for FastAPI endpoints)
# =============================================================================
# Convert PostgreSQL URL to async version (postgresql+asyncpg://)
async_database_url = settings.DATABASE_URL.replace(
"postgresql://", "postgresql+asyncpg://"
).replace("postgresql+psycopg2://", "postgresql+asyncpg://")
async_engine = create_async_engine(
async_database_url,
pool_size=settings.DB_POOL_SIZE,
max_overflow=settings.DB_MAX_OVERFLOW,
pool_timeout=settings.DB_POOL_TIMEOUT,
pool_recycle=settings.DB_POOL_RECYCLE,
pool_pre_ping=True,
echo=settings.DB_ECHO,
future=True,
)
"""Asynchronous SQLAlchemy engine for FastAPI endpoints.
Uses asyncpg driver for high-performance async database operations.
Configuration matches synchronous engine for consistency.
Used for:
- FastAPI endpoint handlers
- Real-time SSE streams
- Async background tasks
"""
AsyncSessionLocal = async_sessionmaker(
bind=async_engine,
class_=AsyncSession,
autocommit=False,
autoflush=False,
expire_on_commit=False,
future=True,
)
"""Asynchronous session factory for FastAPI endpoints.
Creates new async database sessions for each request.
Must be used with async context managers.
Example:
```python
from lazy_bird.core.database import AsyncSessionLocal
# Using async context manager
async with AsyncSessionLocal() as db:
result = await db.execute(select(Project))
projects = result.scalars().all()
await db.commit()
# Using FastAPI dependency
async def get_async_db():
async with AsyncSessionLocal() as db:
yield db
```
"""
# =============================================================================
# DATABASE DEPENDENCIES (for FastAPI)
# =============================================================================
def get_db() -> Generator[Session, None, None]:
"""FastAPI dependency for synchronous database sessions.
Provides a database session to FastAPI endpoints.
Automatically handles session lifecycle and cleanup.
Yields:
Session: SQLAlchemy database session
Example:
```python
from fastapi import Depends
from sqlalchemy.orm import Session
from lazy_bird.core.database import get_db
@app.get("/projects")
def list_projects(db: Session = Depends(get_db)):
projects = db.query(Project).all()
return projects
```
Note:
- Session is automatically committed on success
- Session is automatically rolled back on exception
- Session is always closed after request
"""
db = SessionLocal()
try:
yield db
db.commit()
except Exception:
db.rollback()
raise
finally:
db.close()
async def get_async_db() -> AsyncGenerator[AsyncSession, None]:
"""FastAPI dependency for asynchronous database sessions.
Provides an async database session to FastAPI endpoints.
Automatically handles session lifecycle and cleanup.
Yields:
AsyncSession: Async SQLAlchemy database session
Example:
```python
from fastapi import Depends
from sqlalchemy.ext.asyncio import AsyncSession
from lazy_bird.core.database import get_async_db
@app.get("/projects")
async def list_projects(db: AsyncSession = Depends(get_async_db)):
result = await db.execute(select(Project))
projects = result.scalars().all()
return projects
```
Note:
- Session is automatically committed on success
- Session is automatically rolled back on exception
- Session is always closed after request
"""
async with AsyncSessionLocal() as db:
try:
yield db
await db.commit()
except Exception:
await db.rollback()
raise
# =============================================================================
# DATABASE UTILITIES
# =============================================================================
def init_db() -> None:
"""Initialize database schema.
Creates all tables defined in SQLAlchemy models.
Should only be used in development or testing.
For production, use Alembic migrations instead.
Example:
```python
from lazy_bird.core.database import init_db
# Development setup
init_db() # Creates all tables
```
Warning:
This does NOT run migrations. Use `alembic upgrade head` for production.
"""
Base.metadata.create_all(bind=engine)
def drop_db() -> None:
"""Drop all database tables.
Removes all tables defined in SQLAlchemy models.
DANGEROUS - Only use in development or testing.
Example:
```python
from lazy_bird.core.database import drop_db
# Testing cleanup
drop_db() # Removes all tables
```
Warning:
This will DELETE ALL DATA. Use with extreme caution.
Never call this in production.
"""
Base.metadata.drop_all(bind=engine)
async def init_async_db() -> None:
"""Initialize database schema asynchronously.
Async version of init_db(). Creates all tables.
Should only be used in development or testing.
For production, use Alembic migrations instead.
Example:
```python
from lazy_bird.core.database import init_async_db
# Async development setup
await init_async_db()
```
Warning:
This does NOT run migrations. Use `alembic upgrade head` for production.
"""
async with async_engine.begin() as conn:
await conn.run_sync(Base.metadata.create_all)
async def drop_async_db() -> None:
"""Drop all database tables asynchronously.
Async version of drop_db(). Removes all tables.
DANGEROUS - Only use in development or testing.
Example:
```python
from lazy_bird.core.database import drop_async_db
# Async testing cleanup
await drop_async_db()
```
Warning:
This will DELETE ALL DATA. Use with extreme caution.
Never call this in production.
"""
async with async_engine.begin() as conn:
await conn.run_sync(Base.metadata.drop_all)
# =============================================================================
# CONNECTION POOL MONITORING (optional, for debugging)
# =============================================================================
@event.listens_for(pool.Pool, "connect")
def receive_connect(dbapi_conn, connection_record):
"""Event listener for new database connections.
Logs when new connections are created in the pool.
Useful for debugging connection issues.
Args:
dbapi_conn: Database API connection
connection_record: SQLAlchemy connection record
"""
if settings.DB_ECHO:
print(f"Database connection opened: {id(dbapi_conn)}")
@event.listens_for(pool.Pool, "checkout")
def receive_checkout(dbapi_conn, connection_record, connection_proxy):
"""Event listener for connection checkout from pool.
Logs when connections are checked out from the pool.
Useful for debugging connection pool exhaustion.
Args:
dbapi_conn: Database API connection
connection_record: SQLAlchemy connection record
connection_proxy: Connection proxy
"""
if settings.DB_ECHO:
print(f"Database connection checked out: {id(dbapi_conn)}")
@event.listens_for(pool.Pool, "checkin")
def receive_checkin(dbapi_conn, connection_record):
"""Event listener for connection checkin to pool.
Logs when connections are returned to the pool.
Useful for debugging connection leaks.
Args:
dbapi_conn: Database API connection
connection_record: SQLAlchemy connection record
"""
if settings.DB_ECHO:
print(f"Database connection checked in: {id(dbapi_conn)}")
# =============================================================================
# HEALTH CHECK
# =============================================================================
def check_db_connection() -> bool:
"""Check if database connection is healthy.
Attempts to execute a simple query to verify database connectivity.
Returns:
bool: True if connection is healthy, False otherwise
Example:
```python
from lazy_bird.core.database import check_db_connection
if check_db_connection():
print("Database is healthy")
else:
print("Database is down")
```
"""
try:
with engine.connect() as conn:
conn.execute("SELECT 1")
return True
except Exception as e:
if settings.DB_ECHO:
print(f"Database health check failed: {e}")
return False
async def check_async_db_connection() -> bool:
"""Check if async database connection is healthy.
Async version of check_db_connection().
Attempts to execute a simple query to verify database connectivity.
Returns:
bool: True if connection is healthy, False otherwise
Example:
```python
from lazy_bird.core.database import check_async_db_connection
if await check_async_db_connection():
print("Async database is healthy")
else:
print("Async database is down")
```
"""
try:
async with async_engine.connect() as conn:
await conn.execute("SELECT 1")
return True
except Exception as e:
if settings.DB_ECHO:
print(f"Async database health check failed: {e}")
return False
| """Database connection and session management.
This module provides the SQLAlchemy engine, session factory, and
declarative base for all database models. It uses SQLAlchemy 2.0 syntax
with async support and proper connection pooling.
"""
from typing import AsyncGenerator, Generator
from sqlalchemy import create_engine, event, pool
from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker, create_async_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session, sessionmaker
from lazy_bird.core.config import settings
# =============================================================================
# DECLARATIVE BASE
# =============================================================================
Base = declarative_base()
"""SQLAlchemy declarative base for all models.
All database models should inherit from this base class.
Example:
```python
from lazy_bird.core.database import Base
class Project(Base):
__tablename__ = "projects"
id = Column(Integer, primary_key=True)
```
"""
# =============================================================================
# SYNCHRONOUS DATABASE (for migrations, admin tasks)
# =============================================================================
engine = create_engine(
settings.DATABASE_URL,
pool_size=settings.DB_POOL_SIZE,
max_overflow=settings.DB_MAX_OVERFLOW,
pool_timeout=settings.DB_POOL_TIMEOUT,
pool_recycle=settings.DB_POOL_RECYCLE,
pool_pre_ping=True, # Verify connections before using
echo=settings.DB_ECHO, # Log SQL queries if enabled
future=True, # Use SQLAlchemy 2.0 style
)
"""Synchronous SQLAlchemy engine for database operations.
Uses connection pooling with the following configuration:
- Pool size: Configured via DB_POOL_SIZE
- Max overflow: Configured via DB_MAX_OVERFLOW
- | [
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/engine/create.py\ncreate_engine",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/ext/asyncio/session.py\nAsyncSession",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/ext/declarative/__init__.py\ndeclarative_base",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/orm/session.py\nSession",
"... | yusufkaraaslan/lazy-bird | lazy_bird/core/database.py |
"""Application configuration using Pydantic Settings.
This module loads configuration from environment variables (.env file)
and provides type-safe access to all application settings.
"""
from functools import lru_cache
from typing import Optional
from pydantic import Field, PostgresDsn, RedisDsn, field_validator
from pydantic_settings import BaseSettings, SettingsConfigDict
class Settings(BaseSettings):
"""Application settings loaded from environment variables.
All settings can be overridden via environment variables or .env file.
See .env.example for all available configuration options.
"""
model_config = SettingsConfigDict(
env_file=".env",
env_file_encoding="utf-8",
case_sensitive=False,
extra="ignore", # Ignore unknown env vars
)
# -------------------------------------------------------------------------
# GENERAL CONFIGURATION
# -------------------------------------------------------------------------
ENVIRONMENT: str = Field(
default="development", description="Runtime environment (development, production, testing)"
)
USE_ASYNC_DB: bool = Field(default=True, description="Use async database engine (recommended)")
HOST: str = Field(default="0.0.0.0", description="API host (alias for API_HOST)")
PORT: int = Field(default=8000, ge=1024, le=65535, description="API port (alias for API_PORT)")
# -------------------------------------------------------------------------
# DATABASE CONFIGURATION
# -------------------------------------------------------------------------
DATABASE_URL: str = Field(
default="postgresql://lazy_bird:lazy_bird@localhost:5432/lazy_bird",
description="PostgreSQL connection string",
)
DB_POOL_SIZE: int = Field(default=20, ge=5, le=100, description="Database connection pool size")
DB_MAX_OVERFLOW: int = Field(
default=10, ge=0, le=50, description="Maximum overflow connections"
)
DB_POOL_TIMEOUT: int = Field(
default=30, ge=5, le=120, description="Connection pool timeout in seconds"
)
DB_POOL_RECYCLE: int = Field(
default=3600, ge=300, le=7200, description="Recycle connections after N seconds"
)
DB_ECHO: bool = Field(
default=False, description="Echo SQL queries to console (development only)"
)
# -------------------------------------------------------------------------
# REDIS CONFIGURATION
# -------------------------------------------------------------------------
REDIS_MAX_CONNECTIONS: int = Field(
default=10, ge=1, le=100, description="Maximum Redis connection pool size"
)
REDIS_SOCKET_TIMEOUT: int = Field(
default=5, ge=1, le=30, description="Redis socket timeout in seconds"
)
REDIS_URL: str = Field(
default="redis://localhost:6379/0",
description="Redis connection string for caching and Celery",
)
# -------------------------------------------------------------------------
# CELERY CONFIGURATION
# -------------------------------------------------------------------------
CELERY_BROKER_URL: str = Field(
default="redis://localhost:6379/0", description="Celery broker URL (Redis)"
)
CELERY_RESULT_BACKEND: str = Field(
default="redis://localhost:6379/1", description="Celery result backend URL"
)
CELERY_TASK_ALWAYS_EAGER: bool = Field(
default=False, description="Execute tasks immediately in tests"
)
# -------------------------------------------------------------------------
# API CONFIGURATION
# -------------------------------------------------------------------------
API_TITLE: str = Field(default="Lazy-Bird API", description="API title shown in docs")
API_VERSION: str = Field(default="2.0.0", description="API version")
API_DESCRIPTION: str = Field(
default="Automated development workflow orchestration with Claude Code",
description="API description",
)
API_HOST: str = Field(default="0.0.0.0", description="API host to bind to")
API_PORT: int = Field(default=8000, ge=1024, le=65535, description="API port to bind to")
API_WORKERS: int = Field(default=4, ge=1, le=16, description="Number of Uvicorn workers")
API_RELOAD: bool = Field(default=False, description="Enable auto-reload (development only)")
CORS_ORIGINS: str = Field(
default="http://localhost:3000,http://localhost:5173",
description="Comma-separated list of allowed CORS origins",
)
# -------------------------------------------------------------------------
# SECURITY CONFIGURATION
# -------------------------------------------------------------------------
SECRET_KEY: str = Field(
default="your-secret-key-here-change-in-production-use-openssl-rand-hex-32",
min_length=32,
description="Secret key for JWT signing",
)
JWT_SECRET_KEY: str = Field(
default="your-secret-key-here-change-in-production-use-openssl-rand-hex-32",
min_length=32,
description="Secret key for JWT signing (alias for SECRET_KEY)",
)
API_KEY_SALT: str = Field(
default="your-api-key-salt-here-change-in-production",
min_length=16,
description="Salt for API key hashing",
)
JWT_ALGORITHM: str = Field(default="HS256", description="JWT signing algorithm")
JWT_EXPIRATION_MINUTES: int = Field(
default=15, ge=5, le=1440, description="JWT access token expiration in minutes"
)
JWT_ACCESS_TOKEN_EXPIRE_MINUTES: int = Field(
default=15, ge=5, le=1440, description="JWT access token expiration in minutes (alias)"
)
JWT_REFRESH_TOKEN_EXPIRE_MINUTES: int = Field(
default=10080, # 7 days in minutes
ge=60,
le=43200, # 30 days in minutes
description="JWT refresh token expiration in minutes",
)
PASSWORD_MIN_LENGTH: int = Field(
default=12, ge=8, le=128, description="Minimum password length for validation"
)
RATE_LIMIT_PER_MINUTE: int = Field(
default=60,
ge=1,
le=10000,
description="Maximum requests per minute per client (IP or API key)",
)
# -------------------------------------------------------------------------
# CLAUDE API CONFIGURATION
# -------------------------------------------------------------------------
CLAUDE_API_KEY: Optional[str] = Field(default=None, description="Anthropic Claude API key")
CLAUDE_MODEL: str = Field(
default="claude-sonnet-4-5-20250929", description="Claude model to use for tasks"
)
CLAUDE_MAX_TOKENS: int = Field(
default=8192, ge=1024, le=200000, description="Maximum tokens per Claude request"
)
CLAUDE_TEMPERATURE: float = Field(
default=0.7, ge=0.0, le=1.0, description="Claude temperature (0.0-1.0)"
)
# -------------------------------------------------------------------------
# TASK EXECUTION CONFIGURATION
# -------------------------------------------------------------------------
MAX_PARALLEL_TASKS: int = Field(
default=3, ge=1, le=10, description="Maximum parallel task executions"
)
TASK_TIMEOUT_SECONDS: int = Field(
default=3600, ge=300, le=7200, description="Task execution timeout in seconds"
)
WORKTREE_BASE_PATH: str = Field(
default="/tmp/lazy-bird-worktrees", description="Base path for git worktrees"
)
TASK_RETRY_MAX: int = Field(default=3, ge=0, le=5, description="Maximum task retry attempts")
TASK_RETRY_DELAY_SECONDS: int = Field(
default=60, ge=10, le=300, description="Delay between retry attempts"
)
# -------------------------------------------------------------------------
# COST TRACKING CONFIGURATION
# -------------------------------------------------------------------------
COST_PER_1K_INPUT_TOKENS: float = Field(
default=0.003, ge=0.0, description="Cost per 1K input tokens (USD)"
)
COST_PER_1K_OUTPUT_TOKENS: float = Field(
default=0.015, ge=0.0, description="Cost per 1K output tokens (USD)"
)
MAX_COST_PER_TASK: float = Field(
default=5.0, ge=0.1, le=50.0, description="Maximum cost per task (USD)"
)
DAILY_COST_LIMIT: float = Field(
default=50.0, ge=1.0, le=1000.0, description="Daily cost limit across all tasks (USD)"
)
COST_ALERT_THRESHOLD: float = Field(
default=0.8, ge=0.1, le=1.0, description="Alert when cost reaches this fraction of limit"
)
# -------------------------------------------------------------------------
# GIT CONFIGURATION
# -------------------------------------------------------------------------
GITHUB_TOKEN: Optional[str] = Field(
default=None, description="GitHub personal access token for API access"
)
GITLAB_TOKEN: Optional[str] = Field(
default=None, description="GitLab personal access token for API access"
)
GIT_USER_NAME: str = Field(default="Lazy-Bird Bot", description="Git commit author name")
GIT_USER_EMAIL: str = Field(default="bot@lazy-bird.dev", description="Git commit author email")
# -------------------------------------------------------------------------
# LOGGING CONFIGURATION
# -------------------------------------------------------------------------
LOG_LEVEL: str = Field(
default="INFO", description="Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)"
)
LOG_FORMAT: str = Field(default="json", description="Log format (json or text)")
LOG_FILE: Optional[str] = Field(
default=None, description="Log file path (None for stdout only)"
)
# -------------------------------------------------------------------------
# WEBHOOK CONFIGURATION
# -------------------------------------------------------------------------
WEBHOOK_SECRET: Optional[str] = Field(
default=None, min_length=16, description="Webhook signature secret for validation"
)
WEBHOOK_TIMEOUT_SECONDS: int = Field(
default=30, ge=5, le=120, description="Webhook delivery timeout"
)
# -------------------------------------------------------------------------
# MONITORING CONFIGURATION
# -------------------------------------------------------------------------
ENABLE_METRICS: bool = Field(default=False, description="Enable Prometheus metrics endpoint")
METRICS_PORT: int = Field(
default=9090, ge=1024, le=65535, description="Prometheus metrics port"
)
# -------------------------------------------------------------------------
# DEVELOPMENT SETTINGS
# -------------------------------------------------------------------------
DEBUG: bool = Field(default=False, description="Enable debug mode (development only)")
TESTING: bool = Field(default=False, description="Enable testing mode")
# -------------------------------------------------------------------------
# VALIDATORS
# -------------------------------------------------------------------------
@field_validator("LOG_LEVEL")
@classmethod
def validate_log_level(cls, v: str) -> str:
"""Validate log level is valid."""
allowed = {"DEBUG", "INFO", "WARNING", "ERROR", "CRITICAL"}
v_upper = v.upper()
if v_upper not in allowed:
raise ValueError(f"LOG_LEVEL must be one of {allowed}")
return v_upper
@field_validator("LOG_FORMAT")
@classmethod
def validate_log_format(cls, v: str) -> str:
"""Validate log format is valid."""
allowed = {"json", "text"}
v_lower = v.lower()
if v_lower not in allowed:
raise ValueError(f"LOG_FORMAT must be one of {allowed}")
return v_lower
# -------------------------------------------------------------------------
# COMPUTED PROPERTIES
# -------------------------------------------------------------------------
@property
def cors_origins_list(self) -> list[str]:
"""Get CORS origins as a list."""
return [origin.strip() for origin in self.CORS_ORIGINS.split(",")]
@property
def is_development(self) -> bool:
"""Check if running in development mode."""
return self.DEBUG or self.API_RELOAD
@property
def is_production(self) -> bool:
"""Check if running in production mode."""
return not self.is_development and not self.TESTING
@lru_cache
def get_settings() -> Settings:
"""Get cached settings instance.
Uses LRU cache to ensure settings are only loaded once.
Call this function to access settings throughout the application.
Returns:
Settings instance with all configuration loaded
Example:
```python
from lazy_bird.core.config import get_settings
settings = get_settings()
print(settings.DATABASE_URL)
```
"""
return Settings()
# Convenience: Pre-instantiated settings object for imports
settings = get_settings()
| """Application configuration using Pydantic Settings.
This module loads configuration from environment variables (.env file)
and provides type-safe access to all application settings.
"""
from functools import lru_cache
from typing import Optional
from pydantic import Field, PostgresDsn, RedisDsn, field_validator
from pydantic_settings import BaseSettings, SettingsConfigDict
class Settings(BaseSettings):
"""Application settings loaded from environment variables.
All settings can be overridden via environment variables or .env file.
See .env.example for all available configuration options.
"""
model_config = SettingsConfigDict(
env_file=".env",
env_file_encoding="utf-8",
case_sensitive=False,
extra="ignore", # Ignore unknown env vars
)
# -------------------------------------------------------------------------
# GENERAL CONFIGURATION
# -------------------------------------------------------------------------
ENVIRONMENT: str = Field(
default="development", description="Runtime environment (development, production, testing)"
)
USE_ASYNC_DB: bool = Field(default=True, description="Use async database engine (recommended)")
HOST: str = Field(default="0.0.0.0", description="API host (alias for API_HOST)")
PORT: int = Field(default=8000, ge=1024, le=65535, description="API port (alias for API_PORT)")
# -------------------------------------------------------------------------
# DATABASE CONFIGURATION
# -------------------------------------------------------------------------
DATABASE_URL: str = Field(
default="postgresql://lazy_bird:lazy_bird@localhost:5432/lazy_bird",
description="PostgreSQL connection string",
)
DB_POOL_SIZE: int = Field(default=20, | [
"# pydantic/pydantic:pydantic/v1/fields.py\nField",
"# pydantic/pydantic-settings:pydantic_settings/main.py\nBaseSettings",
"# pydantic/pydantic:pydantic/functional_validators.py\nfield_validator"
] | yusufkaraaslan/lazy-bird | lazy_bird/core/config.py |
"""Core infrastructure modules for Lazy-Bird.
This package contains the foundational components used throughout the application:
- config: Application settings and configuration management
- database: SQLAlchemy database connection and session management
- logging: Structured logging with correlation IDs
- security: Authentication and authorization utilities
- redis: Redis client for caching and Celery
"""
from lazy_bird.core.config import Settings, get_settings, settings
from lazy_bird.core.database import (
AsyncSessionLocal,
Base,
SessionLocal,
async_engine,
check_async_db_connection,
check_db_connection,
drop_async_db,
drop_db,
engine,
get_async_db,
get_db,
init_async_db,
init_db,
)
from lazy_bird.core.logging import (
clear_correlation_id,
get_correlation_id,
get_logger,
log_with_context,
set_correlation_id,
setup_logging,
)
from lazy_bird.core.redis import (
RedisCache,
check_async_redis_connection,
check_redis_connection,
close_async_redis,
close_redis,
get_async_redis,
get_async_redis_client,
get_redis,
get_redis_client,
)
from lazy_bird.core.security import (
constant_time_compare,
create_access_token,
create_refresh_token,
generate_api_key,
generate_secure_random_string,
get_api_key_prefix,
hash_api_key,
hash_password,
verify_api_key,
verify_password,
verify_refresh_token,
verify_token,
)
__all__ = [
# Config
"Settings",
"get_settings",
"settings",
# Database - Base and engines
"Base",
"engine",
"async_engine",
# Database - Session factories
"SessionLocal",
"AsyncSessionLocal",
# Database - Dependencies
"get_db",
"get_async_db",
# Database - Utilities
"init_db",
"drop_db",
"init_async_db",
"drop_async_db",
# Database - Health checks
"check_db_connection",
"check_async_db_connection",
# Logging
"setup_logging",
"get_logger",
"set_correlation_id",
"clear_correlation_id",
"get_correlation_id",
"log_with_context",
# Security - API Keys
"generate_api_key",
"hash_api_key",
"verify_api_key",
"get_api_key_prefix",
# Security - Passwords
"hash_password",
"verify_password",
# Security - JWT Tokens
"create_access_token",
"verify_token",
"create_refresh_token",
"verify_refresh_token",
# Security - Utilities
"generate_secure_random_string",
"constant_time_compare",
# Redis - Clients
"get_redis",
"get_async_redis",
"get_redis_client",
"get_async_redis_client",
# Redis - Connection Management
"check_redis_connection",
"check_async_redis_connection",
"close_redis",
"close_async_redis",
# Redis - Cache
"RedisCache",
]
| """Core infrastructure modules for Lazy-Bird.
This package contains the foundational components used throughout the application:
- config: Application settings and configuration management
- database: SQLAlchemy database connection and session management
- logging: Structured logging with correlation IDs
- security: Authentication and authorization utilities
- redis: Redis client for caching and Celery
"""
from lazy_bird.core.config import Settings, get_settings, settings
from lazy_bird.core.database import (
AsyncSessionLocal,
Base,
SessionLocal,
async_engine,
check_async_db_connection,
check_db_connection,
drop_async_db,
drop_db,
engine,
get_async_db,
get_db,
init_async_db,
init_db,
)
from lazy_bird.core.logging import (
clear_correlation_id,
get_correlation_id,
get_logger,
log_with_context,
set_correlation_id,
setup_logging,
)
from lazy_bird.core.redis import (
RedisCache,
check_async_redis_connection,
check_redis_connection,
close_async_redis,
close_redis,
get_async_redis,
get_async_redis_client,
get_redis,
get_redis_client,
)
from lazy_bird.core.security import (
constant_time_compare,
create_access_token,
create_refresh_token,
generate_api_key,
generate_secure_random_string,
get_api_key_prefix,
hash_api_key,
hash_password,
| [
"# yusufkaraaslan/lazy-bird:lazy_bird/core/config.py\nSettings",
"# yusufkaraaslan/lazy-bird:lazy_bird/core/logging.py\nclear_correlation_id",
"# yusufkaraaslan/lazy-bird:lazy_bird/core/redis.py\nRedisCache",
"# yusufkaraaslan/lazy-bird:lazy_bird/core/security.py\nconstant_time_compare"
] | yusufkaraaslan/lazy-bird | lazy_bird/core/__init__.py |
#!/usr/bin/env python3
"""
Lazy_Bird CLI - Main command-line interface
Provides the main entry point for the lazy-bird command.
"""
import sys
import subprocess
import argparse
from pathlib import Path
from typing import List, Optional
from lazy_bird import __version__, PACKAGE_ROOT
def print_banner():
"""Print the Lazy_Bird ASCII banner"""
banner = r"""
🦜 🦜
_ _ ________ __ __
| | / \ |___ / \ \ / /
| | / _ \ / / \ \_/ /
| |___ / ___ \ / /__ \ /
|_____| /_/ \_\ /_____| |_|
____ ___ ____ ____
| __ ) |_ _| | _ \ | _ \
| _ \ | | | |_) | | | | |
| |_) | | | | _ < | |_| |
|____/ |___| |_| \_\ |____/
💤 💤
Version: {version}
Automate ANY development project while you sleep 🦜💤
""".format(version=__version__)
print(banner)
def run_wizard(args: List[str]) -> int:
"""Run the setup wizard"""
wizard_script = PACKAGE_ROOT / "wizard.sh"
if not wizard_script.exists():
print(f"Error: Wizard script not found at {wizard_script}")
return 1
cmd = ["bash", str(wizard_script)] + args
return subprocess.call(cmd)
def run_server(port: int = 5000, host: str = "127.0.0.1") -> int:
"""Run the web backend server"""
try:
# Import here to avoid import errors if Flask is not installed
sys.path.insert(0, str(PACKAGE_ROOT / "web" / "backend"))
from app import app
print(f"🚀 Starting Lazy_Bird web server on http://{host}:{port}")
print(f"📊 Dashboard: http://{host}:{port}")
print(f"📡 API: http://{host}:{port}/api")
print()
app.run(host=host, port=port, debug=False)
return 0
except ImportError as e:
print(f"Error: Failed to import Flask application: {e}")
print("Install web dependencies: pip install lazy-bird[web]")
return 1
except Exception as e:
print(f"Error starting server: {e}")
return 1
def run_godot_server(args: List[str]) -> int:
"""Run the Godot test server"""
script = PACKAGE_ROOT / "scripts" / "godot-server.py"
if not script.exists():
print(f"Error: Godot server script not found at {script}")
return 1
cmd = [sys.executable, str(script)] + args
return subprocess.call(cmd)
def run_issue_watcher(args: List[str]) -> int:
"""Run the issue watcher"""
script = PACKAGE_ROOT / "scripts" / "issue-watcher.py"
if not script.exists():
print(f"Error: Issue watcher script not found at {script}")
return 1
cmd = [sys.executable, str(script)] + args
return subprocess.call(cmd)
def run_project_manager(args: List[str]) -> int:
"""Run the project manager"""
script = PACKAGE_ROOT / "scripts" / "project-manager.py"
if not script.exists():
print(f"Error: Project manager script not found at {script}")
return 1
cmd = [sys.executable, str(script)] + args
return subprocess.call(cmd)
def main(argv: Optional[List[str]] = None) -> int:
"""Main CLI entry point"""
if argv is None:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(
prog="lazy-bird",
description="Automate development projects with Claude Code",
epilog="For more information, visit: https://github.com/yusufkaraaslan/lazy-bird",
)
parser.add_argument("--version", action="version", version=f"lazy-bird {__version__}")
subparsers = parser.add_subparsers(dest="command", help="Available commands")
# Setup wizard
wizard_parser = subparsers.add_parser("setup", help="Run the setup wizard")
wizard_parser.add_argument("wizard_args", nargs="*", help="Arguments to pass to the wizard")
# Web server
server_parser = subparsers.add_parser("server", help="Start the web backend server")
server_parser.add_argument(
"--host", default="127.0.0.1", help="Host to bind to (default: 127.0.0.1)"
)
server_parser.add_argument(
"--port", type=int, default=5000, help="Port to bind to (default: 5000)"
)
# Godot server
godot_parser = subparsers.add_parser("godot", help="Run the Godot test server")
godot_parser.add_argument("godot_args", nargs="*", help="Arguments to pass to Godot server")
# Issue watcher
watcher_parser = subparsers.add_parser("watch", help="Run the issue watcher")
watcher_parser.add_argument(
"watcher_args", nargs="*", help="Arguments to pass to issue watcher"
)
# Project manager
project_parser = subparsers.add_parser("project", help="Manage projects")
project_parser.add_argument(
"project_args", nargs="*", help="Arguments to pass to project manager"
)
# Status command
status_parser = subparsers.add_parser("status", help="Show system status")
# Parse arguments
args = parser.parse_args(argv)
# Show banner for main command
if not args.command or args.command == "status":
print_banner()
# Execute command
if not args.command:
parser.print_help()
return 0
elif args.command == "setup":
return run_wizard(args.wizard_args)
elif args.command == "server":
return run_server(port=args.port, host=args.host)
elif args.command == "godot":
return run_godot_server(args.godot_args)
elif args.command == "watch":
return run_issue_watcher(args.watcher_args)
elif args.command == "project":
return run_project_manager(args.project_args)
elif args.command == "status":
# Run wizard status
return run_wizard(["--status"])
else:
parser.print_help()
return 1
if __name__ == "__main__":
sys.exit(main())
| #!/usr/bin/env python3
"""
Lazy_Bird CLI - Main command-line interface
Provides the main entry point for the lazy-bird command.
"""
import sys
import subprocess
import argparse
from pathlib import Path
from typing import List, Optional
from lazy_bird import __version__, PACKAGE_ROOT
def print_banner():
"""Print the Lazy_Bird ASCII banner"""
banner = r"""
🦜 🦜
_ _ ________ __ __
| | / \ |___ / \ \ / /
| | / _ \ / / \ \_/ /
| |___ / ___ \ / /__ \ /
|_____| /_/ \_\ /_____| |_|
____ ___ ____ ____
| __ ) |_ _| | _ \ | _ \
| _ \ | | | |_) | | | | |
| |_) | | | | _ < | |_| |
|____/ |___| |_| \_\ |____/
💤 💤
| [] | yusufkaraaslan/lazy-bird | lazy_bird/cli.py |
"""Webhooks API endpoints for managing webhook subscriptions.
This module provides CRUD operations for webhook subscriptions with:
- Event-driven notifications for task execution events
- HMAC signature verification for security
- Project-scoped or global subscriptions
- Automatic failure tracking and retry logic
"""
from datetime import datetime, timezone
from typing import Optional
from uuid import UUID
from fastapi import APIRouter, Depends, Query, Response, status
from sqlalchemy import cast, func, select, Text
from sqlalchemy.ext.asyncio import AsyncSession
from lazy_bird.api.dependencies import RequireAdmin, RequireRead, get_async_database
from lazy_bird.api.exceptions import ResourceNotFoundError
from lazy_bird.core.logging import get_logger
from lazy_bird.models.api_key import ApiKey
from lazy_bird.models.project import Project
from lazy_bird.models.webhook_subscription import WebhookSubscription
from lazy_bird.schemas.webhook import (
WebhookSubscriptionCreate,
WebhookSubscriptionListResponse,
WebhookSubscriptionResponse,
WebhookSubscriptionUpdate,
)
logger = get_logger(__name__)
# Create router
router = APIRouter(prefix="/webhooks", tags=["webhooks"])
@router.get("", response_model=WebhookSubscriptionListResponse)
@router.get("/", response_model=WebhookSubscriptionListResponse)
async def list_webhooks(
# Pagination
page: int = Query(1, ge=1, description="Page number (1-indexed)"),
page_size: int = Query(20, ge=1, le=100, description="Items per page"),
# Filtering
project_id: Optional[UUID] = Query(
None, description="Filter by project ID (omit for global subscriptions)"
),
is_active: Optional[bool] = Query(None, description="Filter by active status"),
event: Optional[str] = Query(None, description="Filter by event type (e.g., task.completed)"),
# Dependencies
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireRead),
) -> WebhookSubscriptionListResponse:
"""List all webhook subscriptions with pagination and filtering.
**Pagination:**
- Offset-based pagination with page and page_size
- Default: page=1, page_size=20
- Max page_size: 100
**Filtering:**
- `project_id`: UUID - Filter by project (NULL for global subscriptions)
- `is_active`: boolean - Filter by active status
- `event`: string - Filter by subscribed event type
**Returns:**
- List of webhook subscriptions
- Total count, page info, pagination metadata
**Authentication:**
- Requires: API key with 'read', 'write', or 'admin' scope
**Event Types:**
- task.queued - Task added to queue
- task.started - Task execution started
- task.completed - Task completed successfully
- task.failed - Task failed
- task.cancelled - Task cancelled
- task.timeout - Task timed out
- pr.created - Pull request created
- pr.merged - Pull request merged
- test.passed - Tests passed
- test.failed - Tests failed
"""
# Build base query
query = select(WebhookSubscription)
# Apply filters
filters = []
# Filter by project ID
if project_id is not None:
filters.append(WebhookSubscription.project_id == project_id)
# Filter by active status
if is_active is not None:
filters.append(WebhookSubscription.is_active == is_active)
# Filter by event type. Use a LIKE-based cast so the query works on both
# PostgreSQL (where events is an ARRAY stored as JSON text) and SQLite
# (where ARRAY is mapped to a JSON column during testing). The ARRAY
# .contains() method is PostgreSQL-only and silently returns no rows on
# SQLite because the dialect doesn't know how to translate it.
if event:
filters.append(cast(WebhookSubscription.events, Text).like(f'%"{event}"%'))
# Add all filters to query
if filters:
query = query.where(*filters)
# Get total count
count_query = select(func.count()).select_from(WebhookSubscription)
if filters:
count_query = count_query.where(*filters)
total_result = await db.execute(count_query)
total = total_result.scalar() or 0
# Calculate pagination
offset = (page - 1) * page_size
pages = (total + page_size - 1) // page_size if total > 0 else 0
# Apply pagination and sorting (most recent first)
query = query.order_by(WebhookSubscription.created_at.desc()).offset(offset).limit(page_size)
# Execute query
result = await db.execute(query)
subscriptions = result.scalars().all()
# Convert to response models
subscription_responses = [WebhookSubscriptionResponse.model_validate(s) for s in subscriptions]
# Log successful query
logger.info(
f"Listed {len(subscriptions)} webhook subscriptions (page {page}/{pages})",
extra={
"extra_fields": {
"total": total,
"page": page,
"page_size": page_size,
"filters": {
"project_id": str(project_id) if project_id else None,
"is_active": is_active,
"event": event,
},
}
},
)
return WebhookSubscriptionListResponse(
items=subscription_responses,
total=total,
page=page,
page_size=page_size,
pages=pages,
)
@router.post("", response_model=WebhookSubscriptionResponse, status_code=status.HTTP_201_CREATED)
@router.post("/", response_model=WebhookSubscriptionResponse, status_code=status.HTTP_201_CREATED)
async def create_webhook(
subscription_data: WebhookSubscriptionCreate,
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireAdmin),
) -> WebhookSubscriptionResponse:
"""Create a new webhook subscription.
**Required Fields:**
- url: Webhook endpoint URL (must be http/https)
- secret: Secret for HMAC signature verification (min 16 chars)
- events: Array of event types to subscribe to
**Optional Fields:**
- project_id: Project ID (NULL for global subscriptions)
- is_active: Active status (default: true)
- description: Subscription description
**Event Types:**
- task.queued, task.started, task.completed, task.failed, task.cancelled, task.timeout
- pr.created, pr.merged
- test.passed, test.failed
**Returns:**
- 201 Created: Subscription created successfully
- 404 Not Found: Project doesn't exist (if project_id provided)
- 422 Validation Error: Invalid input data
**Authentication:**
- Requires: API key with 'admin' scope
**Webhook Payload Format:**
```json
{
"event": "task.completed",
"timestamp": "2024-01-01T12:00:00Z",
"project_id": "uuid",
"task_run_id": "uuid",
"data": { ... }
}
```
**HMAC Signature:**
- Header: `X-Webhook-Signature: sha256=<hmac>`
- Algorithm: HMAC-SHA256
- Key: subscription secret
- Payload: raw JSON body
"""
# Validate project exists if project_id provided
if subscription_data.project_id:
project_query = select(Project).where(Project.id == subscription_data.project_id)
project_result = await db.execute(project_query)
project = project_result.scalar_one_or_none()
if not project:
raise ResourceNotFoundError(
resource_type="Project",
resource_id=str(subscription_data.project_id),
)
# Create webhook subscription
subscription = WebhookSubscription(
url=str(subscription_data.url),
secret=subscription_data.secret,
project_id=subscription_data.project_id,
events=subscription_data.events,
is_active=subscription_data.is_active,
description=subscription_data.description,
failure_count=0,
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
db.add(subscription)
await db.commit()
await db.refresh(subscription)
# Log successful creation (mask secret)
logger.info(
f"Created webhook subscription: {subscription.url}",
extra={
"extra_fields": {
"subscription_id": str(subscription.id),
"url": subscription.url,
"events": subscription.events,
"project_id": (str(subscription.project_id) if subscription.project_id else None),
}
},
)
return WebhookSubscriptionResponse.model_validate(subscription)
@router.get("/{subscription_id}", response_model=WebhookSubscriptionResponse)
async def get_webhook(
subscription_id: UUID,
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireRead),
) -> WebhookSubscriptionResponse:
"""Get a single webhook subscription by ID.
**Path Parameters:**
- subscription_id: UUID - Subscription identifier
**Returns:**
- Subscription details with delivery statistics
**Errors:**
- 404 Not Found: Subscription doesn't exist
**Authentication:**
- Requires: API key with 'read', 'write', or 'admin' scope
"""
# Fetch subscription
query = select(WebhookSubscription).where(WebhookSubscription.id == subscription_id)
result = await db.execute(query)
subscription = result.scalar_one_or_none()
# Check if subscription exists
if not subscription:
raise ResourceNotFoundError(
resource_type="WebhookSubscription",
resource_id=str(subscription_id),
)
# Log successful query
logger.info(
f"Retrieved webhook subscription: {subscription.url}",
extra={
"extra_fields": {
"subscription_id": str(subscription.id),
"url": subscription.url,
}
},
)
return WebhookSubscriptionResponse.model_validate(subscription)
@router.patch("/{subscription_id}", response_model=WebhookSubscriptionResponse)
async def update_webhook(
subscription_id: UUID,
update_data: WebhookSubscriptionUpdate,
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireAdmin),
) -> WebhookSubscriptionResponse:
"""Update an existing webhook subscription (partial update).
**Path Parameters:**
- subscription_id: UUID - Subscription identifier
**Request Body (all optional):**
- url: Webhook endpoint URL
- secret: HMAC signature secret
- events: Array of event types
- is_active: Active status
- description: Subscription description
**Returns:**
- Updated subscription
**Errors:**
- 404 Not Found: Subscription doesn't exist
- 422 Validation Error: Invalid field values
**Authentication:**
- Requires: API key with 'admin' scope
**Note:**
- Set `is_active=false` to temporarily disable webhook deliveries
- Cannot change project_id after creation
- Changing secret will require updating webhook endpoint verification
"""
# Fetch existing subscription
query = select(WebhookSubscription).where(WebhookSubscription.id == subscription_id)
result = await db.execute(query)
subscription = result.scalar_one_or_none()
# Check if subscription exists
if not subscription:
raise ResourceNotFoundError(
resource_type="WebhookSubscription",
resource_id=str(subscription_id),
)
# Get update data (only fields that were provided)
update_fields = update_data.model_dump(exclude_unset=True)
# If no fields to update, return existing subscription
if not update_fields:
logger.info(f"No fields to update for webhook subscription: {subscription.url}")
return WebhookSubscriptionResponse.model_validate(subscription)
# Apply updates
for field, value in update_fields.items():
# Convert HttpUrl to string for url field
if field == "url" and value is not None:
value = str(value)
setattr(subscription, field, value)
# Update timestamp
subscription.updated_at = datetime.now(timezone.utc)
# Commit changes
await db.commit()
await db.refresh(subscription)
# Log successful update
logger.info(
f"Updated webhook subscription: {subscription.url}",
extra={
"extra_fields": {
"subscription_id": str(subscription.id),
"updated_fields": list(update_fields.keys()),
}
},
)
return WebhookSubscriptionResponse.model_validate(subscription)
@router.delete("/{subscription_id}", status_code=status.HTTP_204_NO_CONTENT)
async def delete_webhook(
subscription_id: UUID,
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireAdmin),
) -> Response:
"""Delete a webhook subscription.
**Path Parameters:**
- subscription_id: UUID - Subscription identifier
**Behavior:**
- Hard delete (permanently removes record)
- Immediately stops webhook deliveries
**Returns:**
- 204 No Content: Subscription deleted successfully
**Errors:**
- 404 Not Found: Subscription doesn't exist
**Authentication:**
- Requires: API key with 'admin' scope
**Warning:**
- This is a hard delete - subscription cannot be recovered
- Consider setting is_active=false instead for temporary disabling
"""
# Fetch existing subscription
query = select(WebhookSubscription).where(WebhookSubscription.id == subscription_id)
result = await db.execute(query)
subscription = result.scalar_one_or_none()
# Check if subscription exists
if not subscription:
raise ResourceNotFoundError(
resource_type="WebhookSubscription",
resource_id=str(subscription_id),
)
# Delete the subscription
await db.delete(subscription)
await db.commit()
# Log deletion
logger.info(
f"Deleted webhook subscription: {subscription.url}",
extra={
"extra_fields": {
"subscription_id": str(subscription_id),
"url": subscription.url,
}
},
)
# Return 204 No Content (no response body)
return Response(status_code=status.HTTP_204_NO_CONTENT)
@router.post("/{subscription_id}/test", status_code=status.HTTP_200_OK)
async def test_webhook_delivery(
subscription_id: UUID,
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireAdmin),
) -> dict:
"""Test webhook delivery by sending a test event.
**Path Parameters:**
- subscription_id: UUID - Subscription identifier
**Behavior:**
- Sends a test event to the webhook endpoint
- Verifies endpoint is reachable and responds correctly
- Updates delivery statistics
**Returns:**
- 200 OK: Test delivery details
- 404 Not Found: Subscription doesn't exist
- 500 Internal Server Error: Webhook delivery failed
**Authentication:**
- Requires: API key with 'admin' scope
**Test Event Payload:**
```json
{
"event": "webhook.test",
"timestamp": "2024-01-01T12:00:00Z",
"subscription_id": "uuid",
"data": {
"message": "This is a test webhook delivery"
}
}
```
**Note:**
- This endpoint is useful for verifying webhook configuration
- Does not count as a failure if delivery fails
"""
# Fetch subscription
query = select(WebhookSubscription).where(WebhookSubscription.id == subscription_id)
result = await db.execute(query)
subscription = result.scalar_one_or_none()
# Check if subscription exists
if not subscription:
raise ResourceNotFoundError(
resource_type="WebhookSubscription",
resource_id=str(subscription_id),
)
# Import webhook service for test delivery
from lazy_bird.services.webhook_service import send_test_webhook as send_test
# Send test event
delivery_result = await send_test(subscription, db)
# Log test delivery
logger.info(
f"Test webhook delivery: {subscription.url}",
extra={
"extra_fields": {
"subscription_id": str(subscription.id),
"url": subscription.url,
"success": delivery_result["success"],
"status_code": delivery_result.get("status_code"),
}
},
)
return delivery_result
| """Webhooks API endpoints for managing webhook subscriptions.
This module provides CRUD operations for webhook subscriptions with:
- Event-driven notifications for task execution events
- HMAC signature verification for security
- Project-scoped or global subscriptions
- Automatic failure tracking and retry logic
"""
from datetime import datetime, timezone
from typing import Optional
from uuid import UUID
from fastapi import APIRouter, Depends, Query, Response, status
from sqlalchemy import cast, func, select, Text
from sqlalchemy.ext.asyncio import AsyncSession
from lazy_bird.api.dependencies import RequireAdmin, RequireRead, get_async_database
from lazy_bird.api.exceptions import ResourceNotFoundError
from lazy_bird.core.logging import get_logger
from lazy_bird.models.api_key import ApiKey
from lazy_bird.models.project import Project
from lazy_bird.models.webhook_subscription import WebhookSubscription
from lazy_bird.schemas.webhook import (
WebhookSubscriptionCreate,
WebhookSubscriptionListResponse,
WebhookSubscriptionResponse,
WebhookSubscriptionUpdate,
)
logger = get_logger(__name__)
# Create router
router = APIRouter(prefix="/webhooks", tags=["webhooks"])
@router.get("", response_model=WebhookSubscriptionListResponse)
@router.get("/", response_model=WebhookSubscriptionListResponse)
async def list_webhooks(
# Pagination
page: int = Query(1, ge=1, description="Page number (1-indexed)"),
page_size: int = Query(20, ge=1, le=100, description="Items per page"),
# Filtering
project_id: Optional[UUID] = Query(
None, description="Filter by project ID (omit for global subscriptions)"
),
is_active: Optional[bool] = Query(None, | [
"# fastapi/fastapi:fastapi/routing.py\nIRouter(r",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/util/cython.py\ncast",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/ext/asyncio/session.py\nAsyncSession",
"# yusufkaraaslan/lazy-bird:lazy_bird/api/exceptions.py\nResourceNotFoundError",
"# yusufkaraaslan/lazy-bird:lazy_b... | yusufkaraaslan/lazy-bird | lazy_bird/api/routers/webhooks.py |
"""TaskRuns API endpoints for managing task execution records.
This module provides CRUD operations for task runs with:
- List with cursor-based pagination
- Filtering by status, project, work item
- Task execution lifecycle management
- Cancellation, retry, and log retrieval
"""
import asyncio
import json
from datetime import datetime, timezone
from typing import Optional
from uuid import UUID
from fastapi import APIRouter, Depends, Query, Response, status
from fastapi.responses import StreamingResponse
from sqlalchemy import func, or_, select
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import selectinload
from lazy_bird.api.dependencies import RequireRead, RequireWrite, get_async_database
from lazy_bird.api.exceptions import (
InsufficientPermissionsError,
ResourceConflictError,
ResourceNotFoundError,
)
from lazy_bird.core.logging import get_logger
from lazy_bird.core.redis import get_async_redis
from lazy_bird.models.api_key import ApiKey
from lazy_bird.models.task_run import TaskRun
from lazy_bird.models.task_run_log import TaskRunLog
from lazy_bird.schemas.task_run import (
TaskRunListResponse,
TaskRunQueue,
TaskRunResponse,
TaskRunUpdate,
)
from lazy_bird.services.log_publisher import LogPublisher
from lazy_bird.tasks.task_executor import _fire_webhook
logger = get_logger(__name__)
# Create router
router = APIRouter(prefix="/task-runs", tags=["task-runs"])
@router.get("", response_model=TaskRunListResponse)
@router.get("/", response_model=TaskRunListResponse)
async def list_task_runs(
# Pagination
page: int = Query(1, ge=1, description="Page number (1-indexed)"),
page_size: int = Query(20, ge=1, le=100, description="Items per page"),
# Filtering
project_id: Optional[UUID] = Query(None, description="Filter by project ID"),
status: Optional[str] = Query(
None,
description="Filter by status (queued, running, success, failed, cancelled, timeout)",
),
work_item_id: Optional[str] = Query(None, description="Filter by work item ID"),
task_type: Optional[str] = Query(
None, description="Filter by task type (feature, bugfix, refactor, etc.)"
),
complexity: Optional[str] = Query(
None, description="Filter by complexity (simple, medium, complex)"
),
# Search
search: Optional[str] = Query(
None, min_length=2, description="Search in work_item_title, work_item_id"
),
# Dependencies
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireRead),
) -> TaskRunListResponse:
"""List all task runs with pagination, filtering, and search.
**Pagination:**
- Offset-based pagination with page and page_size
- Default: page=1, page_size=20
- Max page_size: 100
**Filtering:**
- `project_id`: UUID - Filter by project
- `status`: string - Filter by execution status
- `work_item_id`: string - Filter by work item ID
- `task_type`: string - Filter by task type
- `complexity`: string - Filter by complexity
**Search:**
- `search`: string (min 2 chars) - Full-text search across work_item_title, work_item_id
**Returns:**
- List of task runs
- Total count, page info, pagination metadata
**Authentication:**
- Requires: API key with 'read', 'write', or 'admin' scope
"""
# Build base query
query = select(TaskRun)
# Apply filters
filters = []
# Filter by API key's project scope (if project-specific key)
if api_key.project_id:
filters.append(TaskRun.project_id == api_key.project_id)
elif project_id:
# Only apply project_id filter if API key is not project-specific
filters.append(TaskRun.project_id == project_id)
# Filter by status
if status:
filters.append(TaskRun.status == status)
# Filter by work item ID
if work_item_id:
filters.append(TaskRun.work_item_id == work_item_id)
# Filter by task type
if task_type:
filters.append(TaskRun.task_type == task_type)
# Filter by complexity
if complexity:
filters.append(TaskRun.complexity == complexity)
# Apply search (full-text search on work_item_title and work_item_id)
if search:
search_filters = [
TaskRun.work_item_title.ilike(f"%{search}%"),
TaskRun.work_item_id.ilike(f"%{search}%"),
]
filters.append(or_(*search_filters))
# Add all filters to query
if filters:
query = query.where(*filters)
# Get total count
count_query = select(func.count()).select_from(TaskRun)
if filters:
count_query = count_query.where(*filters)
total_result = await db.execute(count_query)
total = total_result.scalar() or 0
# Calculate pagination
offset = (page - 1) * page_size
pages = (total + page_size - 1) // page_size if total > 0 else 0
# Apply pagination and sorting
query = (
query.order_by(TaskRun.created_at.desc())
.offset(offset)
.limit(page_size)
.options(
selectinload(TaskRun.project),
selectinload(TaskRun.claude_account),
)
)
# Execute query
result = await db.execute(query)
task_runs = result.scalars().all()
# Convert to response models
task_run_responses = [TaskRunResponse.model_validate(tr) for tr in task_runs]
# Log successful query
logger.info(
f"Listed {len(task_runs)} task runs (page {page}/{pages})",
extra={
"extra_fields": {
"total": total,
"page": page,
"page_size": page_size,
"filters": {
"project_id": str(project_id) if project_id else None,
"status": status,
"work_item_id": work_item_id,
"search": search,
},
}
},
)
return TaskRunListResponse(
items=task_run_responses,
total=total,
page=page,
page_size=page_size,
pages=pages,
)
@router.post("", response_model=TaskRunResponse, status_code=status.HTTP_201_CREATED)
@router.post("/", response_model=TaskRunResponse, status_code=status.HTTP_201_CREATED)
async def queue_task_run(
task_data: TaskRunQueue,
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireWrite),
) -> TaskRunResponse:
"""Queue a new task run for execution.
**Required Fields:**
- project_id: UUID - Project to run task for
- work_item_id: string - External work item ID (e.g., "issue-42")
- prompt: string - Prompt to send to Claude
**Optional Fields:**
- claude_account_id: UUID - Specific Claude account to use
- work_item_url, work_item_title, work_item_description
- task_type: string (default: "feature")
- complexity: "simple" | "medium" | "complex"
- max_retries: int (default: 3)
- metadata: dict - Additional task metadata
**Validation:**
- Project must exist and not be deleted
- Project automation must be enabled
- Claude account must exist (if provided)
- Project must not exceed concurrent task limit
- Project must not exceed daily cost limit
**Returns:**
- 201 Created: Task queued successfully
- 404 Not Found: Project or Claude account doesn't exist
- 409 Conflict: Automation disabled or limits exceeded
- 422 Validation Error: Invalid input data
**Authentication:**
- Requires: API key with 'write' or 'admin' scope
"""
# Validate project exists and automation enabled
from lazy_bird.models.project import Project
project_query = (
select(Project)
.where(Project.id == task_data.project_id)
.where(Project.deleted_at.is_(None))
)
project_result = await db.execute(project_query)
project = project_result.scalar_one_or_none()
if not project:
raise ResourceNotFoundError(
resource_type="Project",
resource_id=str(task_data.project_id),
)
# Check if automation is enabled
if not project.automation_enabled:
raise ResourceConflictError(
detail=f"Project '{project.name}' has automation disabled. "
"Enable automation in project settings to queue tasks.",
conflict_field="automation_enabled",
)
# Check API key scope (if project-specific key)
if api_key.project_id and api_key.project_id != project.id:
raise InsufficientPermissionsError(
detail=f"API key is scoped to project {api_key.project_id}, "
f"cannot queue task for project {project.id}"
)
# Validate Claude account exists (if provided)
if task_data.claude_account_id:
from lazy_bird.models.claude_account import ClaudeAccount
account_query = select(ClaudeAccount).where(ClaudeAccount.id == task_data.claude_account_id)
account_result = await db.execute(account_query)
account = account_result.scalar_one_or_none()
if not account:
raise ResourceNotFoundError(
resource_type="ClaudeAccount",
resource_id=str(task_data.claude_account_id),
)
# Check concurrent task limit
running_count_query = (
select(func.count())
.select_from(TaskRun)
.where(TaskRun.project_id == project.id)
.where(TaskRun.status.in_(["queued", "running"]))
)
running_count_result = await db.execute(running_count_query)
running_count = running_count_result.scalar() or 0
if running_count >= project.max_concurrent_tasks:
raise ResourceConflictError(
detail=f"Project has {running_count} tasks queued/running, "
f"max concurrent tasks is {project.max_concurrent_tasks}. "
"Wait for tasks to complete or increase limit.",
conflict_field="max_concurrent_tasks",
)
# Check daily cost limit (optional - for future implementation)
# This would query sum of cost_usd for tasks created today
# For now, we'll allow queuing (validation happens during execution)
# Create task run
task_run = TaskRun(
project_id=task_data.project_id,
claude_account_id=task_data.claude_account_id or project.claude_account_id,
work_item_id=task_data.work_item_id,
work_item_url=task_data.work_item_url,
work_item_title=task_data.work_item_title,
work_item_description=task_data.work_item_description,
task_type=task_data.task_type,
complexity=task_data.complexity,
prompt=task_data.prompt,
status="queued",
max_retries=task_data.max_retries,
task_metadata=task_data.metadata or {},
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
db.add(task_run)
await db.commit()
await db.refresh(task_run)
# Trigger Celery task for execution
try:
from lazy_bird.tasks.task_executor import execute_task
execute_task.delay(str(task_run.id))
logger.info(f"Triggered Celery task for {task_run.id}")
except Exception as e:
logger.warning(
f"Failed to trigger Celery task: {e}. " "Task will be picked up by queue processor."
)
# Log successful creation
logger.info(
f"Queued task run: {task_run.work_item_id} for project {project.name}",
extra={
"extra_fields": {
"task_run_id": str(task_run.id),
"project_id": str(project.id),
"work_item_id": task_run.work_item_id,
"status": task_run.status,
}
},
)
return TaskRunResponse.model_validate(task_run)
@router.get("/{task_run_id}", response_model=TaskRunResponse)
async def get_task_run(
task_run_id: UUID,
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireRead),
) -> TaskRunResponse:
"""Get a single task run by ID with full details.
**Path Parameters:**
- task_run_id: UUID - Task run identifier
**Returns:**
- Task run details with:
- All task run fields
- Related entities (project, claude_account)
- Execution status and timings
- Resource usage (tokens, cost)
- Results (PR URL, tests, errors)
**Errors:**
- 404 Not Found: Task run doesn't exist
- 403 Forbidden: API key scope restricted to different project
**Authentication:**
- Requires: API key with 'read', 'write', or 'admin' scope
"""
# Build query with eager loading
query = (
select(TaskRun)
.where(TaskRun.id == task_run_id)
.options(
selectinload(TaskRun.project),
selectinload(TaskRun.claude_account),
)
)
# Execute query
result = await db.execute(query)
task_run = result.scalar_one_or_none()
# Check if task run exists
if not task_run:
raise ResourceNotFoundError(
resource_type="TaskRun",
resource_id=str(task_run_id),
)
# Check API key scope (if project-specific key)
if api_key.project_id and api_key.project_id != task_run.project_id:
raise InsufficientPermissionsError(
detail=f"API key is scoped to project {api_key.project_id}, "
f"cannot access task run for project {task_run.project_id}"
)
# Log successful query
logger.info(
f"Retrieved task run: {task_run.work_item_id} ({task_run.status})",
extra={
"extra_fields": {
"task_run_id": str(task_run.id),
"project_id": str(task_run.project_id),
"status": task_run.status,
}
},
)
return TaskRunResponse.model_validate(task_run)
@router.patch("/{task_run_id}", response_model=TaskRunResponse)
async def update_task_run(
task_run_id: UUID,
update_data: TaskRunUpdate,
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireWrite),
) -> TaskRunResponse:
"""Update task run status and results (partial update).
**Path Parameters:**
- task_run_id: UUID - Task run identifier
**Request Body (all optional):**
- status: string - Execution status
- branch_name, worktree_path, commit_sha: Git details
- pr_url, pr_number, tests_passed, test_output: Results
- error_message: Error details
- tokens_used, cost_usd: Resource usage
- metadata: Additional data
**Returns:**
- Updated task run
**Errors:**
- 404 Not Found: Task run doesn't exist
- 403 Forbidden: API key scope restricted to different project
- 422 Validation Error: Invalid field values
**Authentication:**
- Requires: API key with 'write' or 'admin' scope
**Note:**
- Typically used by task execution system to update progress
- Manual updates should be used with caution
"""
# Fetch existing task run
query = select(TaskRun).where(TaskRun.id == task_run_id)
result = await db.execute(query)
task_run = result.scalar_one_or_none()
# Check if task run exists
if not task_run:
raise ResourceNotFoundError(
resource_type="TaskRun",
resource_id=str(task_run_id),
)
# Check API key scope
if api_key.project_id and api_key.project_id != task_run.project_id:
raise InsufficientPermissionsError(
detail=f"API key is scoped to project {api_key.project_id}, "
f"cannot update task run for project {task_run.project_id}"
)
# Get update data (only fields that were provided)
update_fields = update_data.model_dump(exclude_unset=True)
# If no fields to update, return existing task run
if not update_fields:
logger.info(f"No fields to update for task run: {task_run.work_item_id}")
return TaskRunResponse.model_validate(task_run)
# Validate status transition
if "status" in update_fields:
new_status = update_fields["status"]
terminal_statuses = {"success", "failed", "cancelled", "timeout"}
if task_run.status in terminal_statuses and new_status != task_run.status:
raise ResourceConflictError(
detail=f"Cannot transition from '{task_run.status}' to '{new_status}'. "
f"Task in terminal status.",
conflict_field="status",
)
# Apply updates to task run
for field, value in update_fields.items():
setattr(task_run, field, value)
# Update updated_at timestamp
task_run.updated_at = datetime.now(timezone.utc)
# Commit changes
await db.commit()
await db.refresh(task_run)
# Log successful update
logger.info(
f"Updated task run: {task_run.work_item_id}",
extra={
"extra_fields": {
"task_run_id": str(task_run.id),
"updated_fields": list(update_fields.keys()),
"status": task_run.status,
}
},
)
return TaskRunResponse.model_validate(task_run)
@router.post("/{task_run_id}/cancel", response_model=TaskRunResponse)
async def cancel_task_run(
task_run_id: UUID,
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireWrite),
) -> TaskRunResponse:
"""Cancel a queued or running task.
**Path Parameters:**
- task_run_id: UUID - Task run identifier
**Behavior:**
- Sets status to 'cancelled'
- Sets completed_at timestamp
- Calculates duration if started
- Signals Celery to stop task (future)
**Validation:**
- Task must be in 'queued' or 'running' status
- Cannot cancel completed tasks
**Returns:**
- Updated task run with status='cancelled'
**Errors:**
- 404 Not Found: Task run doesn't exist
- 409 Conflict: Task already in terminal status
- 403 Forbidden: API key scope restricted
**Authentication:**
- Requires: API key with 'write' or 'admin' scope
"""
# Fetch existing task run
query = select(TaskRun).where(TaskRun.id == task_run_id)
result = await db.execute(query)
task_run = result.scalar_one_or_none()
# Check if task run exists
if not task_run:
raise ResourceNotFoundError(
resource_type="TaskRun",
resource_id=str(task_run_id),
)
# Check API key scope
if api_key.project_id and api_key.project_id != task_run.project_id:
raise InsufficientPermissionsError(
detail=f"API key is scoped to project {api_key.project_id}, "
f"cannot cancel task run for project {task_run.project_id}"
)
# Check if task can be cancelled
if task_run.status not in ["queued", "running"]:
raise ResourceConflictError(
detail=f"Cannot cancel task with status '{task_run.status}'. "
"Only queued or running tasks can be cancelled.",
conflict_field="status",
)
# Cancel the task
task_run.status = "cancelled"
task_run.completed_at = datetime.now(timezone.utc)
task_run.updated_at = datetime.now(timezone.utc)
# Calculate duration if started
if task_run.started_at:
duration = task_run.completed_at - task_run.started_at
task_run.duration_seconds = int(duration.total_seconds())
# Signal Celery to stop task
try:
from lazy_bird.tasks import app as celery_app
celery_app.control.revoke(str(task_run.id), terminate=True)
logger.info(f"Sent revoke signal for task {task_run.id}")
except Exception as e:
logger.warning(f"Failed to revoke Celery task: {e}")
# Commit changes
await db.commit()
await db.refresh(task_run)
# Fire webhook for cancelled state
await _fire_webhook(db, task_run, "task.cancelled")
# Log cancellation
logger.info(
f"Cancelled task run: {task_run.work_item_id}",
extra={
"extra_fields": {
"task_run_id": str(task_run.id),
"previous_status": task_run.status,
}
},
)
return TaskRunResponse.model_validate(task_run)
@router.post("/{task_run_id}/retry", response_model=TaskRunResponse)
async def retry_task_run(
task_run_id: UUID,
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireWrite),
) -> TaskRunResponse:
"""Retry a failed task.
**Path Parameters:**
- task_run_id: UUID - Task run identifier
**Behavior:**
- Increments retry_count
- Resets status to 'queued'
- Clears error_message
- Resets started_at and completed_at
- Triggers new execution (future)
**Validation:**
- Task must be in 'failed' or 'timeout' status
- retry_count must be < max_retries
**Returns:**
- Updated task run with status='queued'
**Errors:**
- 404 Not Found: Task run doesn't exist
- 409 Conflict: Task not failed or retry limit exceeded
- 403 Forbidden: API key scope restricted
**Authentication:**
- Requires: API key with 'write' or 'admin' scope
"""
# Fetch existing task run
query = select(TaskRun).where(TaskRun.id == task_run_id)
result = await db.execute(query)
task_run = result.scalar_one_or_none()
# Check if task run exists
if not task_run:
raise ResourceNotFoundError(
resource_type="TaskRun",
resource_id=str(task_run_id),
)
# Check API key scope
if api_key.project_id and api_key.project_id != task_run.project_id:
raise InsufficientPermissionsError(
detail=f"API key is scoped to project {api_key.project_id}, "
f"cannot retry task run for project {task_run.project_id}"
)
# Check if task can be retried
if task_run.status not in ["failed", "timeout"]:
raise ResourceConflictError(
detail=f"Cannot retry task with status '{task_run.status}'. "
"Only failed or timeout tasks can be retried.",
conflict_field="status",
)
# Check retry limit
if task_run.retry_count >= task_run.max_retries:
raise ResourceConflictError(
detail=f"Cannot retry task: retry limit exceeded "
f"({task_run.retry_count}/{task_run.max_retries}).",
conflict_field="retry_count",
)
# Retry the task
task_run.retry_count += 1
task_run.status = "queued"
task_run.started_at = None
task_run.completed_at = None
task_run.duration_seconds = None
task_run.error_message = None
task_run.updated_at = datetime.now(timezone.utc)
# Trigger Celery task for re-execution
try:
from lazy_bird.tasks.task_executor import execute_task
execute_task.delay(str(task_run.id))
logger.info(f"Triggered retry Celery task for {task_run.id}")
except Exception as e:
logger.warning(
f"Failed to trigger retry Celery task: {e}. "
"Task will be picked up by queue processor."
)
# Commit changes
await db.commit()
await db.refresh(task_run)
# Fire webhook for queued state (retry)
await _fire_webhook(db, task_run, "task.queued")
# Log retry
logger.info(
f"Retrying task run: {task_run.work_item_id} (attempt {task_run.retry_count}/{task_run.max_retries})",
extra={
"extra_fields": {
"task_run_id": str(task_run.id),
"retry_count": task_run.retry_count,
"max_retries": task_run.max_retries,
}
},
)
return TaskRunResponse.model_validate(task_run)
@router.delete("/{task_run_id}", status_code=status.HTTP_204_NO_CONTENT)
async def delete_task_run(
task_run_id: UUID,
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireWrite),
) -> Response:
"""Delete a task run record."""
query = select(TaskRun).where(TaskRun.id == task_run_id)
result = await db.execute(query)
task_run = result.scalar_one_or_none()
if not task_run:
raise ResourceNotFoundError(
resource_type="TaskRun",
resource_id=str(task_run_id),
)
await db.delete(task_run)
await db.commit()
return Response(status_code=status.HTTP_204_NO_CONTENT)
@router.get("/{task_run_id}/logs")
async def get_task_run_logs(
task_run_id: UUID,
# Pagination
page: int = Query(1, ge=1, description="Page number (1-indexed)"),
page_size: int = Query(100, ge=1, le=1000, description="Items per page"),
# Filtering
level: Optional[str] = Query(
None, description="Filter by log level (DEBUG, INFO, WARNING, ERROR)"
),
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireRead),
) -> dict:
"""Get task run logs with pagination and filtering.
**Path Parameters:**
- task_run_id: UUID - Task run identifier
**Query Parameters:**
- page: int - Page number (default: 1)
- page_size: int - Items per page (default: 100, max: 1000)
- level: string - Filter by log level (DEBUG, INFO, WARNING, ERROR)
**Returns:**
- Paginated list of log entries
**Errors:**
- 404 Not Found: Task run doesn't exist
- 403 Forbidden: API key scope restricted
**Authentication:**
- Requires: API key with 'read', 'write', or 'admin' scope
**Note:**
- For real-time log streaming, use GET /task-runs/:id/logs/stream (SSE)
"""
# Verify task run exists
task_run_query = select(TaskRun).where(TaskRun.id == task_run_id)
task_run_result = await db.execute(task_run_query)
task_run = task_run_result.scalar_one_or_none()
if not task_run:
raise ResourceNotFoundError(
resource_type="TaskRun",
resource_id=str(task_run_id),
)
# Check API key scope
if api_key.project_id and api_key.project_id != task_run.project_id:
raise InsufficientPermissionsError(
detail=f"API key is scoped to project {api_key.project_id}, "
f"cannot access logs for project {task_run.project_id}"
)
# Build base filter condition
conditions = [TaskRunLog.task_run_id == task_run_id]
# Apply optional level filter (case-insensitive)
if level:
conditions.append(func.lower(TaskRunLog.level) == level.lower())
# Get total count
count_query = select(func.count()).select_from(TaskRunLog).where(*conditions)
total_result = await db.execute(count_query)
total = total_result.scalar() or 0
# Calculate pagination
offset = (page - 1) * page_size
total_pages = (total + page_size - 1) // page_size if total > 0 else 0
# Query logs with pagination, sorted oldest first
query = (
select(TaskRunLog)
.where(*conditions)
.order_by(TaskRunLog.created_at.asc())
.offset(offset)
.limit(page_size)
)
result = await db.execute(query)
logs = result.scalars().all()
return {
"items": [log.to_dict() for log in logs],
"total": total,
"page": page,
"page_size": page_size,
"pages": total_pages,
}
@router.get("/{task_run_id}/logs/stream")
async def stream_task_run_logs(
task_run_id: UUID,
level: Optional[str] = Query(
None, description="Filter by log level (DEBUG, INFO, WARNING, ERROR)"
),
search: Optional[str] = Query(
None, description="Filter logs containing this text (case-insensitive)"
),
since: Optional[datetime] = Query(
None, description="Only show logs after this timestamp (ISO 8601 format)"
),
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireRead),
):
"""Stream task run logs in real-time using Server-Sent Events (SSE).
**Path Parameters:**
- task_run_id: UUID - Task run identifier
**Query Parameters:**
- level: string - Filter by minimum log level (DEBUG < INFO < WARNING < ERROR)
- search: string - Filter logs containing this text (case-insensitive)
- since: datetime - Only show logs after this timestamp (ISO 8601 format)
**Response Format (SSE):**
```
event: log
data: {"timestamp": "2025-01-02T10:30:00Z", "level": "INFO", "message": "...", "metadata": {...}}
event: status
data: {"status": "running", "progress": 50}
event: error
data: {"error": "Connection lost"}
event: end
data: {"message": "Task completed"}
```
**Connection:**
- Uses Server-Sent Events (text/event-stream)
- Client reconnection handled via Last-Event-ID
- Idle timeout: 30 seconds (keepalive pings)
- Graceful disconnection on task completion
**Errors:**
- 404 Not Found: Task run doesn't exist
- 403 Forbidden: API key scope restricted
**Authentication:**
- Requires: API key with 'read', 'write', or 'admin' scope
**Usage Example (JavaScript):**
```javascript
const eventSource = new EventSource('/api/task-runs/{id}/logs/stream?level=INFO');
eventSource.addEventListener('log', (event) => {
const logEntry = JSON.parse(event.data);
console.log(logEntry.message);
});
eventSource.addEventListener('end', () => {
eventSource.close();
});
```
"""
# Verify task run exists
task_run_query = select(TaskRun).where(TaskRun.id == task_run_id)
task_run_result = await db.execute(task_run_query)
task_run = task_run_result.scalar_one_or_none()
if not task_run:
raise ResourceNotFoundError(
resource_type="TaskRun",
resource_id=str(task_run_id),
)
# Check API key scope
if api_key.project_id and api_key.project_id != task_run.project_id:
raise InsufficientPermissionsError(
detail=f"API key is scoped to project {api_key.project_id}, "
f"cannot access logs for project {task_run.project_id}"
)
# Define log level hierarchy
LOG_LEVELS = {"DEBUG": 10, "INFO": 20, "WARNING": 30, "ERROR": 40, "CRITICAL": 50}
min_level = LOG_LEVELS.get(level.upper(), 10) if level else 10
def should_include_log(log_entry: dict) -> bool:
"""Check if log entry passes all filters."""
# Filter by log level
entry_level = LOG_LEVELS.get(log_entry.get("level", "INFO"), 20)
if entry_level < min_level:
return False
# Filter by search text (case-insensitive)
if search:
message = log_entry.get("message", "").lower()
if search.lower() not in message:
return False
# Filter by timestamp
if since:
timestamp_str = log_entry.get("timestamp")
if timestamp_str:
try:
from dateutil.parser import parse as parse_datetime
log_timestamp = parse_datetime(timestamp_str)
# Make both timezone-aware for comparison
if log_timestamp.tzinfo is None:
log_timestamp = log_timestamp.replace(tzinfo=timezone.utc)
if since.tzinfo is None:
since_aware = since.replace(tzinfo=timezone.utc)
else:
since_aware = since
if log_timestamp < since_aware:
return False
except Exception:
# If timestamp parsing fails, include the log
pass
return True
async def event_generator():
"""Generate SSE events from Redis Pub/Sub and log history."""
redis_client = await get_async_redis()
publisher = LogPublisher(use_async=True)
try:
# Determine channel based on task run
channel = f"lazy_bird:logs:task:{task_run_id}"
# Send initial log history
history = await publisher.get_log_history_async(task_id=str(task_run_id), limit=100)
for log_entry in reversed(history): # Send oldest first
# Apply all filters
if should_include_log(log_entry):
yield f"event: log\ndata: {json.dumps(log_entry)}\n\n"
# Send status event
yield f'event: status\ndata: {{"status": "{task_run.status}"}}\n\n'
# Subscribe to Redis Pub/Sub for real-time logs
pubsub = redis_client.pubsub()
await pubsub.subscribe(channel)
logger.info(
f"Started SSE stream for task run: {task_run.work_item_id}",
extra={
"extra_fields": {
"task_run_id": str(task_run_id),
"channel": channel,
"min_level": level or "DEBUG",
}
},
)
# Stream real-time logs
last_keepalive = asyncio.get_event_loop().time()
keepalive_interval = 30 # seconds
while True:
try:
# Check for new messages with timeout
message = await asyncio.wait_for(
pubsub.get_message(ignore_subscribe_messages=True, timeout=1.0),
timeout=2.0,
)
if message and message["type"] == "message":
# Parse log entry
log_entry = json.loads(message["data"])
# Apply all filters
if should_include_log(log_entry):
yield f"event: log\ndata: {json.dumps(log_entry)}\n\n"
# Check if task is complete
if log_entry.get("metadata", {}).get("task_complete"):
yield f'event: end\ndata: {{"message": "Task completed"}}\n\n'
break
# Send keepalive ping if needed
current_time = asyncio.get_event_loop().time()
if current_time - last_keepalive > keepalive_interval:
yield f": keepalive\n\n"
last_keepalive = current_time
except asyncio.TimeoutError:
# No message received, send keepalive
yield f": keepalive\n\n"
last_keepalive = asyncio.get_event_loop().time()
continue
except Exception as e:
logger.error(
f"Error in SSE stream: {str(e)}",
extra={
"extra_fields": {
"task_run_id": str(task_run_id),
"error": str(e),
}
},
)
yield f'event: error\ndata: {{"error": "{str(e)}"}}\n\n'
break
except Exception as e:
logger.error(
f"Fatal error in SSE stream: {str(e)}",
extra={
"extra_fields": {
"task_run_id": str(task_run_id),
"error": str(e),
}
},
exc_info=True,
)
yield f'event: error\ndata: {{"error": "Stream terminated: {str(e)}"}}\n\n'
finally:
# Cleanup
try:
await pubsub.unsubscribe(channel)
await pubsub.close()
except Exception:
pass
logger.info(
f"Closed SSE stream for task run: {task_run.work_item_id}",
extra={
"extra_fields": {
"task_run_id": str(task_run_id),
}
},
)
return StreamingResponse(
event_generator(),
media_type="text/event-stream",
headers={
"Cache-Control": "no-cache",
"Connection": "keep-alive",
"X-Accel-Buffering": "no", # Disable nginx buffering
},
)
| """TaskRuns API endpoints for managing task execution records.
This module provides CRUD operations for task runs with:
- List with cursor-based pagination
- Filtering by status, project, work item
- Task execution lifecycle management
- Cancellation, retry, and log retrieval
"""
import asyncio
import json
from datetime import datetime, timezone
from typing import Optional
from uuid import UUID
from fastapi import APIRouter, Depends, Query, Response, status
from fastapi.responses import StreamingResponse
from sqlalchemy import func, or_, select
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import selectinload
from lazy_bird.api.dependencies import RequireRead, RequireWrite, get_async_database
from lazy_bird.api.exceptions import (
InsufficientPermissionsError,
ResourceConflictError,
ResourceNotFoundError,
)
from lazy_bird.core.logging import get_logger
from lazy_bird.core.redis import get_async_redis
from lazy_bird.models.api_key import ApiKey
from lazy_bird.models.task_run import TaskRun
from lazy_bird.models.task_run_log import TaskRunLog
from lazy_bird.schemas.task_run import (
TaskRunListResponse,
TaskRunQueue,
TaskRunResponse,
TaskRunUpdate,
)
from lazy_bird.services.log_publisher import LogPublisher
from lazy_bird.tasks.task_executor import _fire_webhook
logger = get_logger(__name__)
# Create router
router = APIRouter(prefix="/task-runs", tags=["task-runs"])
@router.get("", response_model=TaskRunListResponse)
@router.get("/", response_model=TaskRunListResponse)
async def list_task_runs(
# Pagination
page: int = Query(1, ge=1, description="Page number (1-index | [
"# fastapi/fastapi:fastapi/routing.py\nIRouter(r",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/ext/asyncio/session.py\nAsyncSession",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/orm/strategy_options.py\nselectinload",
"# yusufkaraaslan/lazy-bird:lazy_bird/api/exceptions.py\nInsufficientPermissionsError",
"# yusufka... | yusufkaraaslan/lazy-bird | lazy_bird/api/routers/task_runs.py |
"""Projects API endpoints for managing development projects.
This module provides CRUD operations for projects with:
- List with cursor-based pagination
- Filtering by automation status and project type
- Full-text search
- Project statistics
"""
from datetime import datetime, timezone
from typing import Optional
from uuid import UUID
from fastapi import APIRouter, Depends, Query, Response, status
from sqlalchemy import func, or_, select
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import selectinload
from lazy_bird.api.dependencies import RequireRead, RequireWrite, get_async_database
from lazy_bird.api.exceptions import ResourceConflictError, ResourceNotFoundError
from lazy_bird.core.logging import get_logger
from lazy_bird.models.api_key import ApiKey
from lazy_bird.models.project import Project
from lazy_bird.models.task_run import TaskRun
from lazy_bird.schemas.project import (
ProjectCreate,
ProjectListResponse,
ProjectResponse,
ProjectUpdate,
)
logger = get_logger(__name__)
# Create router
router = APIRouter(prefix="/projects", tags=["projects"])
@router.get("", response_model=ProjectListResponse)
@router.get("/", response_model=ProjectListResponse)
async def list_projects(
# Pagination
page: int = Query(1, ge=1, description="Page number (1-indexed)"),
page_size: int = Query(20, ge=1, le=100, description="Items per page"),
# Filtering
automation_enabled: Optional[bool] = Query(None, description="Filter by automation status"),
project_type: Optional[str] = Query(
None, description="Filter by project type (e.g., 'python', 'godot')"
),
include_deleted: Optional[bool] = Query(
False, description="Include soft-deleted projects (default: false)"
),
# Search
search: Optional[str] = Query(
None, min_length=2, description="Search in name, description, repository"
),
# Dependencies
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireRead),
) -> ProjectListResponse:
"""List all projects with pagination, filtering, and search.
**Pagination:**
- Offset-based pagination with page and page_size
- Default: page=1, page_size=20
- Max page_size: 100
**Filtering:**
- `automation_enabled`: true/false - Filter by automation status
- `project_type`: string - Filter by project type
- `include_deleted`: true/false - Include soft-deleted projects (default: false)
**Search:**
- `search`: string (min 2 chars) - Full-text search across name, description, repository
**Returns:**
- List of projects with statistics
- Total count, page info, pagination metadata
**Authentication:**
- Requires: API key with 'read', 'write', or 'admin' scope
"""
# Build base query
query = select(Project)
# Apply filters
filters = []
# Filter by API key's project scope (if project-specific key)
if api_key.project_id:
filters.append(Project.id == api_key.project_id)
# Filter by automation status
if automation_enabled is not None:
filters.append(Project.automation_enabled == automation_enabled)
# Filter by project type
if project_type:
filters.append(Project.project_type == project_type)
# Filter by deleted status (default: exclude deleted)
if not include_deleted:
filters.append(Project.deleted_at.is_(None))
# Apply search (full-text search or simple LIKE)
if search:
# Simple LIKE search on name and repo_url
search_filters = [
Project.name.ilike(f"%{search}%"),
Project.repo_url.ilike(f"%{search}%"),
Project.slug.ilike(f"%{search}%"),
]
filters.append(or_(*search_filters))
# Add all filters to query
if filters:
query = query.where(*filters)
# Get total count
count_query = select(func.count()).select_from(Project)
if filters:
count_query = count_query.where(*filters)
total_result = await db.execute(count_query)
total = total_result.scalar() or 0
# Calculate pagination
offset = (page - 1) * page_size
pages = (total + page_size - 1) // page_size if total > 0 else 0
# Apply pagination and sorting
query = (
query.order_by(Project.created_at.desc())
.offset(offset)
.limit(page_size)
.options(
selectinload(Project.framework_preset),
selectinload(Project.task_runs),
)
)
# Execute query
result = await db.execute(query)
projects = result.scalars().all()
# Enrich projects with statistics
project_responses = []
for project in projects:
# Get task run statistics
stats_query = (
select(
TaskRun.status,
func.count(TaskRun.id).label("count"),
)
.where(TaskRun.project_id == project.id)
.group_by(TaskRun.status)
)
stats_result = await db.execute(stats_query)
stats = {row.status: row.count for row in stats_result}
# Calculate total tasks
total_tasks = sum(stats.values())
# Get last task run
last_task_query = (
select(TaskRun.created_at)
.where(TaskRun.project_id == project.id)
.order_by(TaskRun.created_at.desc())
.limit(1)
)
last_task_result = await db.execute(last_task_query)
last_task_at = last_task_result.scalar_one_or_none()
# Create response with stats using model_validate to handle ORM attrs
project_response = ProjectResponse.model_validate(project)
project_response.total_tasks = total_tasks
project_response.tasks_queued = stats.get("queued", 0)
project_response.tasks_running = stats.get("running", 0)
project_response.tasks_success = stats.get("success", 0)
project_response.tasks_failed = stats.get("failed", 0)
project_response.last_task_at = last_task_at
project_responses.append(project_response)
# Log successful query
logger.info(
f"Listed {len(projects)} projects (page {page}/{pages})",
extra={
"extra_fields": {
"total": total,
"page": page,
"page_size": page_size,
"filters": {
"automation_enabled": automation_enabled,
"project_type": project_type,
"search": search,
},
}
},
)
return ProjectListResponse(
items=project_responses,
total=total,
page=page,
page_size=page_size,
pages=pages,
)
@router.post("", response_model=ProjectResponse, status_code=status.HTTP_201_CREATED)
@router.post("/", response_model=ProjectResponse, status_code=status.HTTP_201_CREATED)
async def create_project(
project_data: ProjectCreate,
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireWrite),
) -> ProjectResponse:
"""Create a new project.
**Required Fields:**
- name: Human-readable project name
- slug: URL-safe unique identifier (lowercase, alphanumeric, hyphens)
- repo_url: Git repository URL
- project_type: Project type (python, godot, etc.)
**Optional Fields:**
- framework_preset_id: Reference to framework preset
- test_command, build_command, lint_command, format_command: Custom commands
- automation_enabled: Enable automation (default: false)
- max_concurrent_tasks: Max parallel tasks (default: 3)
- task_timeout_seconds: Timeout in seconds (default: 1800)
- max_cost_per_task_usd: Cost limit per task (default: 5.00)
- daily_cost_limit_usd: Daily cost limit (default: 50.00)
- claude_account_id: Reference to Claude account
**Validation:**
- Slug must be unique across all projects
- Slug format: lowercase, alphanumeric, hyphens only
- Framework preset ID must exist (if provided)
- Claude account ID must exist (if provided)
**Returns:**
- 201 Created: Project created successfully
- 409 Conflict: Slug already exists
- 422 Validation Error: Invalid input data
**Authentication:**
- Requires: API key with 'write' or 'admin' scope
"""
# Check slug uniqueness
slug_query = select(Project).where(Project.slug == project_data.slug)
slug_result = await db.execute(slug_query)
existing_project = slug_result.scalar_one_or_none()
if existing_project:
raise ResourceConflictError(
detail=f"Project with slug '{project_data.slug}' already exists",
conflict_field="slug",
)
# Validate framework preset exists (if provided)
if project_data.framework_preset_id:
from lazy_bird.models.framework_preset import FrameworkPreset
preset_query = select(FrameworkPreset).where(
FrameworkPreset.id == project_data.framework_preset_id
)
preset_result = await db.execute(preset_query)
preset = preset_result.scalar_one_or_none()
if not preset:
raise ResourceNotFoundError(
resource_type="FrameworkPreset",
resource_id=str(project_data.framework_preset_id),
)
# Validate Claude account exists (if provided)
if project_data.claude_account_id:
from lazy_bird.models.claude_account import ClaudeAccount
account_query = select(ClaudeAccount).where(
ClaudeAccount.id == project_data.claude_account_id
)
account_result = await db.execute(account_query)
account = account_result.scalar_one_or_none()
if not account:
raise ResourceNotFoundError(
resource_type="ClaudeAccount",
resource_id=str(project_data.claude_account_id),
)
# Create project
project = Project(
name=project_data.name,
slug=project_data.slug,
repo_url=project_data.repo_url,
default_branch=project_data.default_branch,
project_type=project_data.project_type,
# Framework preset
framework_preset_id=project_data.framework_preset_id,
# Commands
test_command=project_data.test_command,
build_command=project_data.build_command,
lint_command=project_data.lint_command,
format_command=project_data.format_command,
# Automation settings
automation_enabled=project_data.automation_enabled,
ready_state_name=project_data.ready_state_name,
in_progress_state_name=project_data.in_progress_state_name,
review_state_name=project_data.review_state_name,
done_state_name=project_data.done_state_name,
# Resource limits
max_concurrent_tasks=project_data.max_concurrent_tasks,
task_timeout_seconds=project_data.task_timeout_seconds,
max_cost_per_task_usd=project_data.max_cost_per_task_usd,
daily_cost_limit_usd=project_data.daily_cost_limit_usd,
# Integration settings
github_installation_id=project_data.github_installation_id,
gitlab_project_id=project_data.gitlab_project_id,
source_platform=project_data.source_platform,
source_platform_url=project_data.source_platform_url,
# Claude account
claude_account_id=project_data.claude_account_id,
)
db.add(project)
await db.commit()
await db.refresh(project)
# Log successful creation
logger.info(
f"Created project: {project.name} ({project.slug})",
extra={
"extra_fields": {
"project_id": str(project.id),
"slug": project.slug,
"project_type": project.project_type,
"automation_enabled": project.automation_enabled,
}
},
)
# Return response without stats (stats are empty for new project)
return ProjectResponse.model_validate(project)
@router.get("/{project_id}", response_model=ProjectResponse)
async def get_project(
project_id: UUID,
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireRead),
) -> ProjectResponse:
"""Get a single project by ID with full details and statistics.
**Path Parameters:**
- project_id: UUID - Unique project identifier
**Returns:**
- Project details with:
- All project fields
- Related entities (framework preset, Claude account)
- Task statistics (queued, running, success, failed)
- Last task timestamp
**Errors:**
- 404 Not Found: Project does not exist or is soft-deleted
- 403 Forbidden: API key scope restricted to different project
**Authentication:**
- Requires: API key with 'read', 'write', or 'admin' scope
"""
# Build query with eager loading
query = (
select(Project)
.where(Project.id == project_id)
.where(Project.deleted_at.is_(None)) # Exclude soft-deleted
.options(
selectinload(Project.framework_preset),
selectinload(Project.task_runs),
)
)
# Execute query
result = await db.execute(query)
project = result.scalar_one_or_none()
# Check if project exists
if not project:
raise ResourceNotFoundError(
resource_type="Project",
resource_id=str(project_id),
)
# Check API key scope (if project-specific key)
if api_key.project_id and api_key.project_id != project.id:
raise InsufficientPermissionsError(
detail=f"API key is scoped to project {api_key.project_id}, cannot access project {project.id}"
)
# Get task run statistics
stats_query = (
select(
TaskRun.status,
func.count(TaskRun.id).label("count"),
)
.where(TaskRun.project_id == project.id)
.group_by(TaskRun.status)
)
stats_result = await db.execute(stats_query)
stats = {row.status: row.count for row in stats_result}
# Calculate total tasks
total_tasks = sum(stats.values())
# Get last task run
last_task_query = (
select(TaskRun.created_at)
.where(TaskRun.project_id == project.id)
.order_by(TaskRun.created_at.desc())
.limit(1)
)
last_task_result = await db.execute(last_task_query)
last_task_at = last_task_result.scalar_one_or_none()
# Create response with stats using model_validate to handle ORM attrs
project_response = ProjectResponse.model_validate(project)
project_response.total_tasks = total_tasks
project_response.tasks_queued = stats.get("queued", 0)
project_response.tasks_running = stats.get("running", 0)
project_response.tasks_success = stats.get("success", 0)
project_response.tasks_failed = stats.get("failed", 0)
project_response.last_task_at = last_task_at
# Log successful query
logger.info(
f"Retrieved project: {project.name} ({project.slug})",
extra={
"extra_fields": {
"project_id": str(project.id),
"slug": project.slug,
"total_tasks": total_tasks,
}
},
)
return project_response
@router.patch("/{project_id}", response_model=ProjectResponse)
async def update_project(
project_id: UUID,
update_data: ProjectUpdate,
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireWrite),
) -> ProjectResponse:
"""Update an existing project (partial update).
**Path Parameters:**
- project_id: UUID - Project identifier
**Request Body:**
- All fields optional (partial update)
- Only provided fields will be updated
- See ProjectUpdate schema for available fields
**Validation:**
- Slug uniqueness (if slug is changed)
- Framework preset existence (if framework_preset_id is changed)
- Claude account existence (if claude_account_id is changed)
**Returns:**
- Updated project with full details and statistics
**Errors:**
- 404 Not Found: Project doesn't exist or is soft-deleted
- 409 Conflict: Slug already exists
- 422 Validation Error: Invalid field values
- 403 Forbidden: API key scope restricted to different project
**Authentication:**
- Requires: API key with 'write' or 'admin' scope
"""
# Fetch existing project
query = select(Project).where(Project.id == project_id).where(Project.deleted_at.is_(None))
result = await db.execute(query)
project = result.scalar_one_or_none()
# Check if project exists
if not project:
raise ResourceNotFoundError(
resource_type="Project",
resource_id=str(project_id),
)
# Check API key scope (if project-specific key)
if api_key.project_id and api_key.project_id != project.id:
raise InsufficientPermissionsError(
detail=f"API key is scoped to project {api_key.project_id}, cannot update project {project.id}"
)
# Get update data (only fields that were provided)
update_fields = update_data.model_dump(exclude_unset=True)
# If no fields to update, return existing project
if not update_fields:
logger.info(
f"No fields to update for project: {project.name} ({project.slug})",
extra={"extra_fields": {"project_id": str(project.id)}},
)
return ProjectResponse.model_validate(project)
# Validate slug uniqueness (if slug is being changed)
if "slug" in update_fields and update_fields["slug"] != project.slug:
slug_query = select(Project).where(Project.slug == update_fields["slug"])
slug_result = await db.execute(slug_query)
existing_project = slug_result.scalar_one_or_none()
if existing_project:
raise ResourceConflictError(
detail=f"Project with slug '{update_fields['slug']}' already exists",
conflict_field="slug",
)
# Validate framework preset exists (if being changed)
if "framework_preset_id" in update_fields and update_fields["framework_preset_id"]:
from lazy_bird.models.framework_preset import FrameworkPreset
preset_query = select(FrameworkPreset).where(
FrameworkPreset.id == update_fields["framework_preset_id"]
)
preset_result = await db.execute(preset_query)
preset = preset_result.scalar_one_or_none()
if not preset:
raise ResourceNotFoundError(
resource_type="FrameworkPreset",
resource_id=str(update_fields["framework_preset_id"]),
)
# Validate Claude account exists (if being changed)
if "claude_account_id" in update_fields and update_fields["claude_account_id"]:
from lazy_bird.models.claude_account import ClaudeAccount
account_query = select(ClaudeAccount).where(
ClaudeAccount.id == update_fields["claude_account_id"]
)
account_result = await db.execute(account_query)
account = account_result.scalar_one_or_none()
if not account:
raise ResourceNotFoundError(
resource_type="ClaudeAccount",
resource_id=str(update_fields["claude_account_id"]),
)
# Apply updates to project
for field, value in update_fields.items():
setattr(project, field, value)
# Commit changes
await db.commit()
await db.refresh(project)
# Log successful update
logger.info(
f"Updated project: {project.name} ({project.slug})",
extra={
"extra_fields": {
"project_id": str(project.id),
"slug": project.slug,
"updated_fields": list(update_fields.keys()),
}
},
)
# Return updated project without stats (detail view)
return ProjectResponse.model_validate(project)
@router.delete("/{project_id}", status_code=status.HTTP_204_NO_CONTENT)
async def delete_project(
project_id: UUID,
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireWrite),
) -> Response:
"""Soft delete a project (sets deleted_at timestamp).
**Path Parameters:**
- project_id: UUID - Project identifier
**Behavior:**
- Sets \`deleted_at\` timestamp (soft delete)
- Does not physically delete the record
- Project will be excluded from list/get queries
- Related records (task runs, webhooks) remain in database
**Cascade Effects:**
- Project becomes inaccessible via standard endpoints
- Task runs remain in database (historical data preserved)
- Webhooks remain in database (can be cleaned up separately)
- API keys scoped to this project become ineffective
**Returns:**
- 204 No Content: Project deleted successfully (no response body)
**Errors:**
- 404 Not Found: Project doesn't exist or already deleted
- 403 Forbidden: API key scope restricted to different project
**Authentication:**
- Requires: API key with 'write' or 'admin' scope
**Note:**
- This is a soft delete - data is preserved for recovery
- To permanently delete, use database admin tools
"""
# Fetch existing project
query = (
select(Project)
.where(Project.id == project_id)
.where(Project.deleted_at.is_(None)) # Only allow deleting active projects
)
result = await db.execute(query)
project = result.scalar_one_or_none()
# Check if project exists
if not project:
raise ResourceNotFoundError(
resource_type="Project",
resource_id=str(project_id),
)
# Check API key scope (if project-specific key)
if api_key.project_id and api_key.project_id != project.id:
raise InsufficientPermissionsError(
detail=f"API key is scoped to project {api_key.project_id}, cannot delete project {project.id}"
)
# Set deleted_at timestamp (soft delete)
project.deleted_at = datetime.now(timezone.utc)
# Commit changes
await db.commit()
# Log successful deletion
logger.info(
f"Soft deleted project: {project.name} ({project.slug})",
extra={
"extra_fields": {
"project_id": str(project.id),
"slug": project.slug,
"deleted_at": project.deleted_at.isoformat(),
}
},
)
# Return 204 No Content (no response body)
return Response(status_code=status.HTTP_204_NO_CONTENT)
| """Projects API endpoints for managing development projects.
This module provides CRUD operations for projects with:
- List with cursor-based pagination
- Filtering by automation status and project type
- Full-text search
- Project statistics
"""
from datetime import datetime, timezone
from typing import Optional
from uuid import UUID
from fastapi import APIRouter, Depends, Query, Response, status
from sqlalchemy import func, or_, select
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import selectinload
from lazy_bird.api.dependencies import RequireRead, RequireWrite, get_async_database
from lazy_bird.api.exceptions import ResourceConflictError, ResourceNotFoundError
from lazy_bird.core.logging import get_logger
from lazy_bird.models.api_key import ApiKey
from lazy_bird.models.project import Project
from lazy_bird.models.task_run import TaskRun
from lazy_bird.schemas.project import (
ProjectCreate,
ProjectListResponse,
ProjectResponse,
ProjectUpdate,
)
logger = get_logger(__name__)
# Create router
router = APIRouter(prefix="/projects", tags=["projects"])
@router.get("", response_model=ProjectListResponse)
@router.get("/", response_model=ProjectListResponse)
async def list_projects(
# Pagination
page: int = Query(1, ge=1, description="Page number (1-indexed)"),
page_size: int = Query(20, ge=1, le=100, description="Items per page"),
# Filtering
automation_enabled: Optional[bool] = Query(None, description="Filter by automation status"),
project_type: Optional[str] = Query(
None, description="Filter by project type (e.g., 'python', 'godot')"
),
include_deleted: Optional[bool] | [
"# fastapi/fastapi:fastapi/routing.py\nIRouter(r",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/ext/asyncio/session.py\nAsyncSession",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/orm/strategy_options.py\nselectinload",
"# yusufkaraaslan/lazy-bird:lazy_bird/api/exceptions.py\nResourceConflictError",
"# yusufkaraaslan... | yusufkaraaslan/lazy-bird | lazy_bird/api/routers/projects.py |
"""Health check endpoints for system monitoring.
This module provides health check endpoints for:
- Overall system health
- Database connection status
- Redis connection status
- Celery worker status
- Disk space and memory usage
"""
import os
import shutil
from datetime import datetime
from typing import Any, Dict
import psutil
from fastapi import APIRouter, status
from fastapi.responses import JSONResponse
from lazy_bird.core.config import settings
from lazy_bird.core.database import check_async_db_connection, check_db_connection
from lazy_bird.core.logging import get_logger
from lazy_bird.core.redis import check_async_redis_connection, check_redis_connection
logger = get_logger(__name__)
# Create router
router = APIRouter(prefix="/health", tags=["health"])
async def check_database() -> Dict[str, Any]:
"""Check database connection status.
Returns:
Dict[str, Any]: Database health status
"""
try:
if settings.USE_ASYNC_DB:
is_healthy = await check_async_db_connection()
else:
is_healthy = check_db_connection()
return {
"status": "healthy" if is_healthy else "unhealthy",
"database_url": (
settings.DATABASE_URL.split("@")[-1] if "@" in settings.DATABASE_URL else "unknown"
),
"mode": "async" if settings.USE_ASYNC_DB else "sync",
}
except Exception as e:
logger.error(f"Database health check failed: {e}")
return {
"status": "unhealthy",
"error": str(e),
}
async def check_redis_status() -> Dict[str, Any]:
"""Check Redis connection status.
Returns:
Dict[str, Any]: Redis health status
"""
try:
if settings.USE_ASYNC_DB:
is_healthy = await check_async_redis_connection()
else:
is_healthy = check_redis_connection()
return {
"status": "healthy" if is_healthy else "unhealthy",
"redis_url": (
settings.REDIS_URL.split("@")[-1] if "@" in settings.REDIS_URL else "unknown"
),
}
except Exception as e:
logger.error(f"Redis health check failed: {e}")
return {
"status": "unhealthy",
"error": str(e),
}
async def check_celery_status() -> Dict[str, Any]:
"""Check Celery worker status.
Returns:
Dict[str, Any]: Celery health status
"""
try:
# Import Celery app
from lazy_bird.tasks import celery_app
# Get active workers
inspect = celery_app.control.inspect(timeout=2.0)
active_workers = inspect.active()
if active_workers:
worker_count = len(active_workers)
total_tasks = sum(len(tasks) for tasks in active_workers.values())
return {
"status": "healthy",
"workers": worker_count,
"active_tasks": total_tasks,
"worker_names": list(active_workers.keys()),
}
else:
return {
"status": "degraded",
"workers": 0,
"message": "No active Celery workers found",
}
except ImportError:
return {
"status": "not_configured",
"message": "Celery tasks module not yet implemented",
}
except Exception as e:
logger.error(f"Celery health check failed: {e}")
return {
"status": "unhealthy",
"error": str(e),
}
def get_system_metrics() -> Dict[str, Any]:
"""Get system resource metrics.
Returns:
Dict[str, Any]: System metrics (CPU, memory, disk)
"""
try:
# Memory usage
memory = psutil.virtual_memory()
# Disk usage (current directory)
disk = shutil.disk_usage(os.getcwd())
# CPU usage (1 second average)
cpu_percent = psutil.cpu_percent(interval=1)
return {
"cpu": {
"usage_percent": cpu_percent,
"cores": psutil.cpu_count(),
},
"memory": {
"total_mb": round(memory.total / (1024 * 1024)),
"available_mb": round(memory.available / (1024 * 1024)),
"used_mb": round(memory.used / (1024 * 1024)),
"percent": memory.percent,
},
"disk": {
"total_gb": round(disk.total / (1024 * 1024 * 1024)),
"used_gb": round(disk.used / (1024 * 1024 * 1024)),
"free_gb": round(disk.free / (1024 * 1024 * 1024)),
"percent": round((disk.used / disk.total) * 100, 1),
},
}
except Exception as e:
logger.error(f"System metrics collection failed: {e}")
return {
"error": str(e),
}
@router.get("", response_class=JSONResponse)
@router.get("/", response_class=JSONResponse)
async def health_check() -> JSONResponse:
"""Comprehensive health check endpoint.
Checks:
- Database connection
- Redis connection
- Celery workers
- System metrics
Returns:
JSONResponse: Overall health status
Status Codes:
- 200: All systems healthy
- 503: One or more systems unhealthy
"""
# Check all services
db_health = await check_database()
redis_health = await check_redis_status()
celery_health = await check_celery_status()
system_metrics = get_system_metrics()
# Determine overall status
is_healthy = (
db_health.get("status") == "healthy"
and redis_health.get("status") == "healthy"
and celery_health.get("status") in ("healthy", "degraded", "not_configured")
)
overall_status = "healthy" if is_healthy else "unhealthy"
response = {
"status": overall_status,
"timestamp": datetime.utcnow().isoformat() + "Z",
"version": settings.API_VERSION,
"environment": settings.ENVIRONMENT,
"services": {
"database": db_health,
"redis": redis_health,
"celery": celery_health,
},
"system": system_metrics,
}
# Return appropriate status code
status_code = status.HTTP_200_OK if is_healthy else status.HTTP_503_SERVICE_UNAVAILABLE
return JSONResponse(content=response, status_code=status_code)
@router.get("/live", response_class=JSONResponse)
async def liveness_probe() -> JSONResponse:
"""Kubernetes liveness probe endpoint.
Simple check that the application is running.
Returns:
JSONResponse: Basic liveness status
Status Codes:
- 200: Application is alive
"""
return JSONResponse(
content={
"status": "alive",
"timestamp": datetime.utcnow().isoformat() + "Z",
},
status_code=status.HTTP_200_OK,
)
@router.get("/ready", response_class=JSONResponse)
async def readiness_probe() -> JSONResponse:
"""Kubernetes readiness probe endpoint.
Checks if application is ready to serve traffic.
Requires database and Redis to be healthy.
Returns:
JSONResponse: Readiness status
Status Codes:
- 200: Application is ready
- 503: Application is not ready
"""
# Check critical services only (no Celery check for readiness)
db_health = await check_database()
redis_health = await check_redis_status()
is_ready = db_health.get("status") == "healthy" and redis_health.get("status") == "healthy"
response = {
"status": "ready" if is_ready else "not_ready",
"timestamp": datetime.utcnow().isoformat() + "Z",
"services": {
"database": db_health.get("status"),
"redis": redis_health.get("status"),
},
}
status_code = status.HTTP_200_OK if is_ready else status.HTTP_503_SERVICE_UNAVAILABLE
return JSONResponse(content=response, status_code=status_code)
@router.get("/startup", response_class=JSONResponse)
async def startup_probe() -> JSONResponse:
"""Kubernetes startup probe endpoint.
Checks if application has started successfully.
More lenient than readiness probe during startup.
Returns:
JSONResponse: Startup status
Status Codes:
- 200: Application has started
- 503: Application is still starting
"""
# Just check if database is reachable
db_health = await check_database()
is_started = db_health.get("status") == "healthy"
response = {
"status": "started" if is_started else "starting",
"timestamp": datetime.utcnow().isoformat() + "Z",
"database": db_health.get("status"),
}
status_code = status.HTTP_200_OK if is_started else status.HTTP_503_SERVICE_UNAVAILABLE
return JSONResponse(content=response, status_code=status_code)
| """Health check endpoints for system monitoring.
This module provides health check endpoints for:
- Overall system health
- Database connection status
- Redis connection status
- Celery worker status
- Disk space and memory usage
"""
import os
import shutil
from datetime import datetime
from typing import Any, Dict
import psutil
from fastapi import APIRouter, status
from fastapi.responses import JSONResponse
from lazy_bird.core.config import settings
from lazy_bird.core.database import check_async_db_connection, check_db_connection
from lazy_bird.core.logging import get_logger
from lazy_bird.core.redis import check_async_redis_connection, check_redis_connection
logger = get_logger(__name__)
# Create router
router = APIRouter(prefix="/health", tags=["health"])
async def check_database() -> Dict[str, Any]:
"""Check database connection status.
Returns:
Dict[str, Any]: Database health status
"""
try:
if settings.USE_ASYNC_DB:
is_healthy = await check_async_db_connection()
else:
is_healthy = check_db_connection()
return {
"status": "healthy" if is_healthy else "unhealthy",
"database_url": (
settings.DATABASE_URL.split("@")[-1] if "@" in settings.DATABASE_URL else "unknown"
),
"mode": "async" if settings.USE_ASYNC_DB else "sync",
}
| [
"# fastapi/fastapi:fastapi/routing.py\nIRouter(r",
"# yusufkaraaslan/lazy-bird:lazy_bird/core/database.py\ncheck_async_db_connection",
"# yusufkaraaslan/lazy-bird:lazy_bird/core/logging.py\nget_logger",
"# yusufkaraaslan/lazy-bird:lazy_bird/core/redis.py\ncheck_async_redis_connection"
] | yusufkaraaslan/lazy-bird | lazy_bird/api/routers/health.py |
"""FrameworkPresets API endpoints for managing framework configurations.
This module provides CRUD operations for framework presets with:
- Built-in presets (Godot, Django, React, etc.)
- Custom user-defined presets
- Test/build/lint/format command configuration
- Framework-specific metadata
"""
from datetime import datetime, timezone
from typing import Optional
from uuid import UUID
from fastapi import APIRouter, Depends, Query, Response, status
from sqlalchemy import func, select
from sqlalchemy.ext.asyncio import AsyncSession
from lazy_bird.api.dependencies import RequireAdmin, RequireRead, get_async_database
from lazy_bird.api.exceptions import ResourceConflictError, ResourceNotFoundError
from lazy_bird.core.logging import get_logger
from lazy_bird.models.api_key import ApiKey
from lazy_bird.models.framework_preset import FrameworkPreset
from lazy_bird.schemas.framework_preset import (
FrameworkPresetCreate,
FrameworkPresetListResponse,
FrameworkPresetResponse,
FrameworkPresetUpdate,
)
logger = get_logger(__name__)
# Create router
router = APIRouter(prefix="/framework-presets", tags=["framework-presets"])
@router.get("", response_model=FrameworkPresetListResponse)
@router.get("/", response_model=FrameworkPresetListResponse)
async def list_framework_presets(
# Pagination
page: int = Query(1, ge=1, description="Page number (1-indexed)"),
page_size: int = Query(20, ge=1, le=100, description="Items per page"),
# Filtering
framework_type: Optional[str] = Query(
None, description="Filter by framework type (game_engine, backend, frontend, language)"
),
language: Optional[str] = Query(None, description="Filter by programming language"),
is_builtin: Optional[bool] = Query(
None, description="Filter by built-in status (true for built-in, false for custom)"
),
# Dependencies
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireRead),
) -> FrameworkPresetListResponse:
"""List all framework presets with pagination and filtering.
**Pagination:**
- Offset-based pagination with page and page_size
- Default: page=1, page_size=20
- Max page_size: 100
**Filtering:**
- `framework_type`: string - Filter by type (game_engine, backend, frontend, language)
- `language`: string - Filter by programming language
- `is_builtin`: boolean - Filter by built-in vs custom presets
**Returns:**
- List of framework presets
- Total count, page info, pagination metadata
**Authentication:**
- Requires: API key with 'read', 'write', or 'admin' scope
**Built-in Presets:**
- Godot, Unity, Unreal, Bevy (game engines)
- Django, Flask, FastAPI, Express, Rails (backend)
- React, Vue, Angular, Svelte (frontend)
- Python, Rust, Node.js, Go (languages)
"""
# Build base query
query = select(FrameworkPreset)
# Apply filters
filters = []
# Filter by framework type
if framework_type:
filters.append(FrameworkPreset.framework_type == framework_type)
# Filter by language
if language:
filters.append(FrameworkPreset.language == language)
# Filter by built-in status
if is_builtin is not None:
filters.append(FrameworkPreset.is_builtin == is_builtin)
# Add all filters to query
if filters:
query = query.where(*filters)
# Get total count
count_query = select(func.count()).select_from(FrameworkPreset)
if filters:
count_query = count_query.where(*filters)
total_result = await db.execute(count_query)
total = total_result.scalar() or 0
# Calculate pagination
offset = (page - 1) * page_size
pages = (total + page_size - 1) // page_size if total > 0 else 0
# Apply pagination and sorting (built-in first, then by name)
query = (
query.order_by(FrameworkPreset.is_builtin.desc(), FrameworkPreset.name.asc())
.offset(offset)
.limit(page_size)
)
# Execute query
result = await db.execute(query)
presets = result.scalars().all()
# Convert to response models
preset_responses = [FrameworkPresetResponse.model_validate(p) for p in presets]
# Log successful query
logger.info(
f"Listed {len(presets)} framework presets (page {page}/{pages})",
extra={
"extra_fields": {
"total": total,
"page": page,
"page_size": page_size,
"filters": {
"framework_type": framework_type,
"language": language,
"is_builtin": is_builtin,
},
}
},
)
return FrameworkPresetListResponse(
items=preset_responses,
total=total,
page=page,
page_size=page_size,
pages=pages,
)
@router.post("", response_model=FrameworkPresetResponse, status_code=status.HTTP_201_CREATED)
@router.post("/", response_model=FrameworkPresetResponse, status_code=status.HTTP_201_CREATED)
async def create_framework_preset(
preset_data: FrameworkPresetCreate,
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireAdmin),
) -> FrameworkPresetResponse:
"""Create a new custom framework preset.
**Required Fields:**
- name: Internal name (lowercase, alphanumeric, hyphens)
- display_name: Human-readable name
- framework_type: Framework category
- test_command: Command to run tests
**Optional Fields:**
- description: Preset description
- language: Programming language
- build_command: Build command
- lint_command: Lint command
- format_command: Format command
- config_files: Framework-specific config file paths (JSON)
**Validation:**
- Name must be unique
- Name format: lowercase, alphanumeric, hyphens only
**Returns:**
- 201 Created: Preset created successfully
- 409 Conflict: Name already exists
- 422 Validation Error: Invalid input data
**Authentication:**
- Requires: API key with 'admin' scope
**Note:**
- Custom presets have is_builtin=false
- Built-in presets can only be created via database seeding
"""
# Check name uniqueness
name_query = select(FrameworkPreset).where(FrameworkPreset.name == preset_data.name)
name_result = await db.execute(name_query)
existing_preset = name_result.scalar_one_or_none()
if existing_preset:
raise ResourceConflictError(
detail=f"Framework preset with name '{preset_data.name}' already exists",
conflict_field="name",
)
# Create preset (custom presets have is_builtin=False)
preset = FrameworkPreset(
name=preset_data.name,
display_name=preset_data.display_name,
description=preset_data.description,
framework_type=preset_data.framework_type,
language=preset_data.language,
test_command=preset_data.test_command,
build_command=preset_data.build_command,
lint_command=preset_data.lint_command,
format_command=preset_data.format_command,
config_files=preset_data.config_files or {},
is_builtin=False, # Custom presets are never built-in
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
db.add(preset)
await db.commit()
await db.refresh(preset)
# Log successful creation
logger.info(
f"Created framework preset: {preset.display_name} ({preset.name})",
extra={
"extra_fields": {
"preset_id": str(preset.id),
"name": preset.name,
"framework_type": preset.framework_type,
}
},
)
return FrameworkPresetResponse.model_validate(preset)
@router.get("/{preset_id}", response_model=FrameworkPresetResponse)
async def get_framework_preset(
preset_id: UUID,
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireRead),
) -> FrameworkPresetResponse:
"""Get a single framework preset by ID.
**Path Parameters:**
- preset_id: UUID - Preset identifier
**Returns:**
- Preset details with all configuration
**Errors:**
- 404 Not Found: Preset doesn't exist
**Authentication:**
- Requires: API key with 'read', 'write', or 'admin' scope
"""
# Fetch preset
query = select(FrameworkPreset).where(FrameworkPreset.id == preset_id)
result = await db.execute(query)
preset = result.scalar_one_or_none()
# Check if preset exists
if not preset:
raise ResourceNotFoundError(
resource_type="FrameworkPreset",
resource_id=str(preset_id),
)
# Log successful query
logger.info(
f"Retrieved framework preset: {preset.display_name}",
extra={
"extra_fields": {
"preset_id": str(preset.id),
"name": preset.name,
}
},
)
return FrameworkPresetResponse.model_validate(preset)
@router.patch("/{preset_id}", response_model=FrameworkPresetResponse)
async def update_framework_preset(
preset_id: UUID,
update_data: FrameworkPresetUpdate,
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireAdmin),
) -> FrameworkPresetResponse:
"""Update an existing framework preset (partial update).
**Path Parameters:**
- preset_id: UUID - Preset identifier
**Request Body (all optional):**
- display_name: Human-readable name
- description: Preset description
- framework_type: Framework category
- language: Programming language
- test_command: Test command
- build_command: Build command
- lint_command: Lint command
- format_command: Format command
- config_files: Config file paths
**Returns:**
- Updated preset
**Errors:**
- 404 Not Found: Preset doesn't exist
- 409 Conflict: Cannot update built-in preset
- 422 Validation Error: Invalid field values
**Authentication:**
- Requires: API key with 'admin' scope
**Note:**
- Built-in presets cannot be updated (protection against accidental modification)
- Name field cannot be changed after creation
"""
# Fetch existing preset
query = select(FrameworkPreset).where(FrameworkPreset.id == preset_id)
result = await db.execute(query)
preset = result.scalar_one_or_none()
# Check if preset exists
if not preset:
raise ResourceNotFoundError(
resource_type="FrameworkPreset",
resource_id=str(preset_id),
)
# Check if built-in (cannot update built-in presets)
if preset.is_builtin:
raise ResourceConflictError(
detail=f"Cannot update built-in preset '{preset.name}'. "
"Create a custom preset instead.",
conflict_field="is_builtin",
)
# Get update data (only fields that were provided)
update_fields = update_data.model_dump(exclude_unset=True)
# If no fields to update, return existing preset
if not update_fields:
logger.info(f"No fields to update for framework preset: {preset.name}")
return FrameworkPresetResponse.model_validate(preset)
# Apply updates
for field, value in update_fields.items():
setattr(preset, field, value)
# Update timestamp
preset.updated_at = datetime.now(timezone.utc)
# Commit changes
await db.commit()
await db.refresh(preset)
# Log successful update
logger.info(
f"Updated framework preset: {preset.display_name}",
extra={
"extra_fields": {
"preset_id": str(preset.id),
"updated_fields": list(update_fields.keys()),
}
},
)
return FrameworkPresetResponse.model_validate(preset)
@router.delete("/{preset_id}", status_code=status.HTTP_204_NO_CONTENT)
async def delete_framework_preset(
preset_id: UUID,
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireAdmin),
) -> Response:
"""Delete a custom framework preset.
**Path Parameters:**
- preset_id: UUID - Preset identifier
**Behavior:**
- Hard delete (permanently removes record)
- Cascade effects:
- Projects using this preset: framework_preset_id set to NULL
**Returns:**
- 204 No Content: Preset deleted successfully
**Errors:**
- 404 Not Found: Preset doesn't exist
- 409 Conflict: Cannot delete built-in preset
**Authentication:**
- Requires: API key with 'admin' scope
**Warning:**
- Built-in presets cannot be deleted (protection)
- Projects will lose preset association
"""
# Fetch existing preset
query = select(FrameworkPreset).where(FrameworkPreset.id == preset_id)
result = await db.execute(query)
preset = result.scalar_one_or_none()
# Check if preset exists
if not preset:
raise ResourceNotFoundError(
resource_type="FrameworkPreset",
resource_id=str(preset_id),
)
# Check if built-in (cannot delete built-in presets)
if preset.is_builtin:
raise ResourceConflictError(
detail=f"Cannot delete built-in preset '{preset.name}'. "
"Built-in presets are protected.",
conflict_field="is_builtin",
)
# Delete the preset (cascade handled by database)
await db.delete(preset)
await db.commit()
# Log deletion
logger.info(
f"Deleted framework preset: {preset.display_name}",
extra={
"extra_fields": {
"preset_id": str(preset_id),
"name": preset.name,
}
},
)
# Return 204 No Content (no response body)
return Response(status_code=status.HTTP_204_NO_CONTENT)
| """FrameworkPresets API endpoints for managing framework configurations.
This module provides CRUD operations for framework presets with:
- Built-in presets (Godot, Django, React, etc.)
- Custom user-defined presets
- Test/build/lint/format command configuration
- Framework-specific metadata
"""
from datetime import datetime, timezone
from typing import Optional
from uuid import UUID
from fastapi import APIRouter, Depends, Query, Response, status
from sqlalchemy import func, select
from sqlalchemy.ext.asyncio import AsyncSession
from lazy_bird.api.dependencies import RequireAdmin, RequireRead, get_async_database
from lazy_bird.api.exceptions import ResourceConflictError, ResourceNotFoundError
from lazy_bird.core.logging import get_logger
from lazy_bird.models.api_key import ApiKey
from lazy_bird.models.framework_preset import FrameworkPreset
from lazy_bird.schemas.framework_preset import (
FrameworkPresetCreate,
FrameworkPresetListResponse,
FrameworkPresetResponse,
FrameworkPresetUpdate,
)
logger = get_logger(__name__)
# Create router
router = APIRouter(prefix="/framework-presets", tags=["framework-presets"])
@router.get("", response_model=FrameworkPresetListResponse)
@router.get("/", response_model=FrameworkPresetListResponse)
async def list_framework_presets(
# Pagination
page: int = Query(1, ge=1, description="Page number (1-indexed)"),
page_size: int = Query(20, ge=1, le=100, description="Items per page"),
# Filtering
framework_type: Optional[str] = Query(
None, description="Filter by framework type (game_engine, backend, frontend, language)"
),
language: Optional[str] = Query(None, description="Filter by programming | [
"# fastapi/fastapi:fastapi/routing.py\nIRouter(r",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/ext/asyncio/session.py\nAsyncSession",
"# yusufkaraaslan/lazy-bird:lazy_bird/api/exceptions.py\nResourceConflictError",
"# yusufkaraaslan/lazy-bird:lazy_bird/core/logging.py\nget_logger",
"# yusufkaraaslan/lazy-bird:l... | yusufkaraaslan/lazy-bird | lazy_bird/api/routers/framework_presets.py |
"""ClaudeAccounts API endpoints for managing Claude API/subscription accounts.
This module provides CRUD operations for Claude accounts with:
- Support for both API and subscription modes
- Encrypted credential storage
- Usage tracking and budget limits
- Account activation/deactivation
"""
from datetime import datetime, timezone
from typing import Optional
from uuid import UUID
from fastapi import APIRouter, Depends, Query, Response, status
from sqlalchemy import func, select
from sqlalchemy.ext.asyncio import AsyncSession
from lazy_bird.api.dependencies import RequireAdmin, RequireRead, get_async_database
from lazy_bird.api.exceptions import ResourceConflictError, ResourceNotFoundError
from lazy_bird.core.logging import get_logger
from lazy_bird.models.api_key import ApiKey
from lazy_bird.models.claude_account import ClaudeAccount
from lazy_bird.schemas.claude_account import (
ClaudeAccountCreate,
ClaudeAccountListResponse,
ClaudeAccountResponse,
ClaudeAccountUpdate,
)
logger = get_logger(__name__)
# Create router
router = APIRouter(prefix="/claude-accounts", tags=["claude-accounts"])
@router.get("", response_model=ClaudeAccountListResponse)
@router.get("/", response_model=ClaudeAccountListResponse)
async def list_claude_accounts(
# Pagination
page: int = Query(1, ge=1, description="Page number (1-indexed)"),
page_size: int = Query(20, ge=1, le=100, description="Items per page"),
# Filtering
account_type: Optional[str] = Query(
None, description="Filter by account type (api, subscription)"
),
is_active: Optional[bool] = Query(None, description="Filter by active status"),
# Dependencies
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireRead),
) -> ClaudeAccountListResponse:
"""List all Claude accounts with pagination and filtering.
**Pagination:**
- Offset-based pagination with page and page_size
- Default: page=1, page_size=20
- Max page_size: 100
**Filtering:**
- `account_type`: string - Filter by type (api, subscription)
- `is_active`: boolean - Filter by active status
**Returns:**
- List of Claude accounts (credentials masked)
- Total count, page info, pagination metadata
**Authentication:**
- Requires: API key with 'read', 'write', or 'admin' scope
**Security:**
- API keys are never returned (only safe preview)
- Session tokens are never returned
"""
# Build base query
query = select(ClaudeAccount)
# Apply filters
filters = []
# Filter by account type
if account_type:
filters.append(ClaudeAccount.account_type == account_type)
# Filter by active status
if is_active is not None:
filters.append(ClaudeAccount.is_active == is_active)
# Add all filters to query
if filters:
query = query.where(*filters)
# Get total count
count_query = select(func.count()).select_from(ClaudeAccount)
if filters:
count_query = count_query.where(*filters)
total_result = await db.execute(count_query)
total = total_result.scalar() or 0
# Calculate pagination
offset = (page - 1) * page_size
pages = (total + page_size - 1) // page_size if total > 0 else 0
# Apply pagination and sorting
query = query.order_by(ClaudeAccount.created_at.desc()).offset(offset).limit(page_size)
# Execute query
result = await db.execute(query)
accounts = result.scalars().all()
# Convert to response models (credentials automatically masked by schema)
account_responses = [ClaudeAccountResponse.model_validate(acc) for acc in accounts]
# Log successful query
logger.info(
f"Listed {len(accounts)} Claude accounts (page {page}/{pages})",
extra={
"extra_fields": {
"total": total,
"page": page,
"page_size": page_size,
"filters": {
"account_type": account_type,
"is_active": is_active,
},
}
},
)
return ClaudeAccountListResponse(
items=account_responses,
total=total,
page=page,
page_size=page_size,
pages=pages,
)
@router.post("", response_model=ClaudeAccountResponse, status_code=status.HTTP_201_CREATED)
@router.post("/", response_model=ClaudeAccountResponse, status_code=status.HTTP_201_CREATED)
async def create_claude_account(
account_data: ClaudeAccountCreate,
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireAdmin),
) -> ClaudeAccountResponse:
"""Create a new Claude account.
**Required Fields:**
- name: Human-readable account name
- account_type: "api" or "subscription"
**API Mode (account_type="api"):**
- api_key: Anthropic API key (will be encrypted)
**Subscription Mode (account_type="subscription"):**
- config_directory: Path to Claude config directory
- session_token: Session token (will be encrypted, optional)
**Optional Fields:**
- model: Claude model (default: "claude-sonnet-4-5")
- max_tokens: Maximum tokens (default: 8000)
- temperature: Model temperature (default: 0.7)
- monthly_budget_usd: Monthly spending limit
- is_active: Active status (default: true)
**Returns:**
- 201 Created: Account created successfully
- 422 Validation Error: Invalid input data
**Authentication:**
- Requires: API key with 'admin' scope
**Security:**
- API keys and session tokens are encrypted before storage
- Credentials are never returned in responses
"""
# Create account (encryption handled by model)
account = ClaudeAccount(
name=account_data.name,
account_type=account_data.account_type,
# API mode
api_key=account_data.api_key, # Will be encrypted by model
# Subscription mode
config_directory=account_data.config_directory,
session_token=account_data.session_token, # Will be encrypted
# Claude settings
model=account_data.model,
max_tokens=account_data.max_tokens,
temperature=account_data.temperature,
# Usage limits
monthly_budget_usd=account_data.monthly_budget_usd,
is_active=account_data.is_active,
# Timestamps
created_at=datetime.now(timezone.utc),
updated_at=datetime.now(timezone.utc),
)
db.add(account)
await db.commit()
await db.refresh(account)
# Log successful creation (without sensitive data)
logger.info(
f"Created Claude account: {account.name} ({account.account_type})",
extra={
"extra_fields": {
"account_id": str(account.id),
"account_type": account.account_type,
"model": account.model,
}
},
)
return ClaudeAccountResponse.model_validate(account)
@router.get("/{account_id}", response_model=ClaudeAccountResponse)
async def get_claude_account(
account_id: UUID,
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireRead),
) -> ClaudeAccountResponse:
"""Get a single Claude account by ID.
**Path Parameters:**
- account_id: UUID - Account identifier
**Returns:**
- Account details (credentials masked)
**Errors:**
- 404 Not Found: Account doesn't exist
**Authentication:**
- Requires: API key with 'read', 'write', or 'admin' scope
**Security:**
- Full API keys and session tokens are never returned
- Only safe previews are included in response
"""
# Fetch account
query = select(ClaudeAccount).where(ClaudeAccount.id == account_id)
result = await db.execute(query)
account = result.scalar_one_or_none()
# Check if account exists
if not account:
raise ResourceNotFoundError(
resource_type="ClaudeAccount",
resource_id=str(account_id),
)
# Log successful query
logger.info(
f"Retrieved Claude account: {account.name}",
extra={
"extra_fields": {
"account_id": str(account.id),
"account_type": account.account_type,
}
},
)
return ClaudeAccountResponse.model_validate(account)
@router.patch("/{account_id}", response_model=ClaudeAccountResponse)
async def update_claude_account(
account_id: UUID,
update_data: ClaudeAccountUpdate,
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireAdmin),
) -> ClaudeAccountResponse:
"""Update an existing Claude account (partial update).
**Path Parameters:**
- account_id: UUID - Account identifier
**Request Body (all optional):**
- name: Account name
- api_key: New API key (will be encrypted)
- config_directory: Config directory path
- session_token: New session token (will be encrypted)
- model: Claude model
- max_tokens: Maximum tokens
- temperature: Model temperature
- monthly_budget_usd: Monthly spending limit
- is_active: Active status
**Returns:**
- Updated account
**Errors:**
- 404 Not Found: Account doesn't exist
- 422 Validation Error: Invalid field values
**Authentication:**
- Requires: API key with 'admin' scope
**Note:**
- account_type cannot be changed after creation
- Updating credentials (api_key, session_token) will re-encrypt them
"""
# Fetch existing account
query = select(ClaudeAccount).where(ClaudeAccount.id == account_id)
result = await db.execute(query)
account = result.scalar_one_or_none()
# Check if account exists
if not account:
raise ResourceNotFoundError(
resource_type="ClaudeAccount",
resource_id=str(account_id),
)
# Get update data (only fields that were provided)
update_fields = update_data.model_dump(exclude_unset=True)
# If no fields to update, return existing account
if not update_fields:
logger.info(f"No fields to update for Claude account: {account.name}")
return ClaudeAccountResponse.model_validate(account)
# Apply updates (encryption handled by model setters)
for field, value in update_fields.items():
# Map schema fields to model fields
if field == "api_key":
account.api_key_encrypted = value # Will be encrypted
elif field == "session_token":
account.session_token_encrypted = value # Will be encrypted
else:
setattr(account, field, value)
# Update timestamp
account.updated_at = datetime.now(timezone.utc)
# Commit changes
await db.commit()
await db.refresh(account)
# Log successful update (without sensitive data)
logger.info(
f"Updated Claude account: {account.name}",
extra={
"extra_fields": {
"account_id": str(account.id),
"updated_fields": list(update_fields.keys()),
}
},
)
return ClaudeAccountResponse.model_validate(account)
@router.delete("/{account_id}", status_code=status.HTTP_204_NO_CONTENT)
async def delete_claude_account(
account_id: UUID,
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireAdmin),
) -> Response:
"""Delete a Claude account.
**Path Parameters:**
- account_id: UUID - Account identifier
**Behavior:**
- Hard delete (permanently removes record)
- Cascade effects:
- Projects using this account: claude_account_id set to NULL
- Task runs using this account: claude_account_id set to NULL
**Returns:**
- 204 No Content: Account deleted successfully
**Errors:**
- 404 Not Found: Account doesn't exist
**Authentication:**
- Requires: API key with 'admin' scope
**Warning:**
- This is a hard delete - account cannot be recovered
- Projects and task runs will lose account association
- Consider setting is_active=false instead for soft deactivation
"""
# Fetch existing account
query = select(ClaudeAccount).where(ClaudeAccount.id == account_id)
result = await db.execute(query)
account = result.scalar_one_or_none()
# Check if account exists
if not account:
raise ResourceNotFoundError(
resource_type="ClaudeAccount",
resource_id=str(account_id),
)
# Delete the account (cascade handled by database)
await db.delete(account)
await db.commit()
# Log deletion
logger.info(
f"Deleted Claude account: {account.name}",
extra={
"extra_fields": {
"account_id": str(account_id),
"account_type": account.account_type,
}
},
)
# Return 204 No Content (no response body)
return Response(status_code=status.HTTP_204_NO_CONTENT)
| """ClaudeAccounts API endpoints for managing Claude API/subscription accounts.
This module provides CRUD operations for Claude accounts with:
- Support for both API and subscription modes
- Encrypted credential storage
- Usage tracking and budget limits
- Account activation/deactivation
"""
from datetime import datetime, timezone
from typing import Optional
from uuid import UUID
from fastapi import APIRouter, Depends, Query, Response, status
from sqlalchemy import func, select
from sqlalchemy.ext.asyncio import AsyncSession
from lazy_bird.api.dependencies import RequireAdmin, RequireRead, get_async_database
from lazy_bird.api.exceptions import ResourceConflictError, ResourceNotFoundError
from lazy_bird.core.logging import get_logger
from lazy_bird.models.api_key import ApiKey
from lazy_bird.models.claude_account import ClaudeAccount
from lazy_bird.schemas.claude_account import (
ClaudeAccountCreate,
ClaudeAccountListResponse,
ClaudeAccountResponse,
ClaudeAccountUpdate,
)
logger = get_logger(__name__)
# Create router
router = APIRouter(prefix="/claude-accounts", tags=["claude-accounts"])
@router.get("", response_model=ClaudeAccountListResponse)
@router.get("/", response_model=ClaudeAccountListResponse)
async def list_claude_accounts(
# Pagination
page: int = Query(1, ge=1, description="Page number (1-indexed)"),
page_size: int = Query(20, ge=1, le=100, description="Items per page"),
# Filtering
account_type: Optional[str] = Query(
None, description="Filter by account type (api, subscription)"
),
is_active: Optional[bool] = Query(None, description="Filter by active status"),
# Dependencies
| [
"# fastapi/fastapi:fastapi/routing.py\nIRouter(r",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/ext/asyncio/session.py\nAsyncSession",
"# yusufkaraaslan/lazy-bird:lazy_bird/api/exceptions.py\nResourceConflictError",
"# yusufkaraaslan/lazy-bird:lazy_bird/core/logging.py\nget_logger",
"# yusufkaraaslan/lazy-bird:l... | yusufkaraaslan/lazy-bird | lazy_bird/api/routers/claude_accounts.py |
"""Authentication API endpoints for JWT-based auth.
This module provides endpoints for:
- User registration
- User login (credential verification, token issuance)
- Token refresh
- Logout (token blacklisting via Redis)
"""
from datetime import datetime, timezone
from typing import Optional
from uuid import UUID
from fastapi import APIRouter, Depends, status
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from lazy_bird.api.dependencies import get_async_database, get_token_from_header
from lazy_bird.core.config import settings
from lazy_bird.api.exceptions import AuthenticationError, ResourceConflictError
from lazy_bird.core.logging import get_logger
from lazy_bird.core.security import (
create_access_token,
create_refresh_token,
hash_password,
verify_password,
verify_refresh_token,
)
from lazy_bird.models.user import User
from lazy_bird.schemas.user import (
RefreshRequest,
TokenResponse,
UserCreate,
UserLogin,
UserResponse,
)
logger = get_logger(__name__)
# Create router
router = APIRouter(prefix="/auth", tags=["auth"])
@router.post("/register", response_model=UserResponse, status_code=status.HTTP_201_CREATED)
async def register(
user_data: UserCreate,
db: AsyncSession = Depends(get_async_database),
) -> UserResponse:
"""Register a new user.
**Request Body:**
- email: Valid email address (must be unique)
- password: Password (8-128 characters)
- display_name: Optional display name
**Returns:**
- 201 Created: User created successfully
- 409 Conflict: Email already registered
**Authentication:**
- None required (public endpoint)
"""
# Check if email already exists
existing_query = select(User).where(User.email == user_data.email)
existing_result = await db.execute(existing_query)
existing_user = existing_result.scalar_one_or_none()
if existing_user:
raise ResourceConflictError(
detail=f"Email '{user_data.email}' is already registered",
conflict_field="email",
conflict_value=user_data.email,
)
# Create user
new_user = User(
email=user_data.email,
password_hash=hash_password(user_data.password),
display_name=user_data.display_name,
role="user",
is_active=True,
created_at=datetime.now(timezone.utc),
)
db.add(new_user)
await db.commit()
await db.refresh(new_user)
logger.info(
f"User registered: {new_user.email}",
extra={
"extra_fields": {
"user_id": str(new_user.id),
"email": new_user.email,
}
},
)
return UserResponse.model_validate(new_user)
@router.post("/login", response_model=TokenResponse)
async def login(
credentials: UserLogin,
db: AsyncSession = Depends(get_async_database),
) -> TokenResponse:
"""Login with email and password.
**Request Body:**
- email: Registered email address
- password: Account password
**Returns:**
- 200 OK: TokenResponse with access_token and refresh_token
- 401 Unauthorized: Invalid credentials
**Authentication:**
- None required (public endpoint)
"""
# Find user by email
query = select(User).where(User.email == credentials.email)
result = await db.execute(query)
user = result.scalar_one_or_none()
# Verify user exists and password matches
if not user or not verify_password(credentials.password, user.password_hash):
raise AuthenticationError(detail="Invalid email or password")
# Check if user is active
if not user.is_active:
raise AuthenticationError(detail="Account is disabled")
# Create tokens
token_data = {"sub": str(user.id), "email": user.email, "role": user.role}
access_token = create_access_token(data=token_data)
refresh_token = create_refresh_token(data=token_data)
logger.info(
f"User logged in: {user.email}",
extra={
"extra_fields": {
"user_id": str(user.id),
"email": user.email,
}
},
)
return TokenResponse(
access_token=access_token,
refresh_token=refresh_token,
token_type="bearer",
)
@router.post("/refresh", response_model=TokenResponse)
async def refresh(
request: RefreshRequest,
db: AsyncSession = Depends(get_async_database),
) -> TokenResponse:
"""Refresh access token using a valid refresh token.
**Request Body:**
- refresh_token: Valid JWT refresh token
**Returns:**
- 200 OK: New TokenResponse with fresh access_token and refresh_token
- 401 Unauthorized: Invalid or expired refresh token
**Authentication:**
- None required (uses refresh token for authentication)
"""
# Verify refresh token
payload = verify_refresh_token(request.refresh_token)
if not payload:
raise AuthenticationError(detail="Invalid or expired refresh token")
# Get user ID from token
user_id_str = payload.get("sub")
if not user_id_str:
raise AuthenticationError(detail="Invalid token: missing user identifier")
# Convert string back to UUID for database query
try:
user_id = UUID(user_id_str)
except (ValueError, AttributeError):
raise AuthenticationError(detail="Invalid token: malformed user identifier")
# Verify user still exists and is active
query = select(User).where(User.id == user_id)
result = await db.execute(query)
user = result.scalar_one_or_none()
if not user:
raise AuthenticationError(detail="User not found")
if not user.is_active:
raise AuthenticationError(detail="Account is disabled")
# Create new tokens
token_data = {"sub": str(user.id), "email": user.email, "role": user.role}
access_token = create_access_token(data=token_data)
new_refresh_token = create_refresh_token(data=token_data)
logger.info(
f"Token refreshed for user: {user.email}",
extra={
"extra_fields": {
"user_id": str(user.id),
}
},
)
return TokenResponse(
access_token=access_token,
refresh_token=new_refresh_token,
token_type="bearer",
)
@router.post("/logout", status_code=status.HTTP_200_OK)
async def logout(
token: Optional[str] = Depends(get_token_from_header),
) -> dict:
"""Logout by blacklisting the current access token in Redis.
**Behavior:**
- Adds the token's JTI (or hash) to a Redis blacklist
- Token becomes invalid for future requests
- Blacklist entry expires when the token would have expired
**Returns:**
- 200 OK: Logout successful
**Authentication:**
- Optional: If token provided, it's blacklisted
"""
if token:
try:
from lazy_bird.core.security import verify_token
from lazy_bird.core.redis import get_async_redis
payload = verify_token(token)
if payload:
redis_client = await get_async_redis()
if redis_client:
import hashlib
token_hash = hashlib.sha256(token.encode()).hexdigest()
# Blacklist for remaining token lifetime (default 15 min)
ttl = settings.JWT_ACCESS_TOKEN_EXPIRE_MINUTES * 60
await redis_client.setex(f"blacklist:{token_hash}", ttl, "1")
logger.info("Token blacklisted on logout")
except Exception as e:
logger.warning(f"Token blacklisting failed (non-critical): {e}")
return {"message": "Logged out successfully."}
| """Authentication API endpoints for JWT-based auth.
This module provides endpoints for:
- User registration
- User login (credential verification, token issuance)
- Token refresh
- Logout (token blacklisting via Redis)
"""
from datetime import datetime, timezone
from typing import Optional
from uuid import UUID
from fastapi import APIRouter, Depends, status
from sqlalchemy import select
from sqlalchemy.ext.asyncio import AsyncSession
from lazy_bird.api.dependencies import get_async_database, get_token_from_header
from lazy_bird.core.config import settings
from lazy_bird.api.exceptions import AuthenticationError, ResourceConflictError
from lazy_bird.core.logging import get_logger
from lazy_bird.core.security import (
create_access_token,
create_refresh_token,
hash_password,
verify_password,
verify_refresh_token,
)
from lazy_bird.models.user import User
from lazy_bird.schemas.user import (
RefreshRequest,
TokenResponse,
UserCreate,
UserLogin,
UserResponse,
)
logger = get_logger(__name__)
# Create router
router = APIRouter(prefix="/auth", tags=["auth"])
@router.post("/register", response_model=UserResponse, status_code=status.HTTP_201_CREATED)
async def register(
user_data: UserCreate,
db: AsyncSession = Depends(get_async_database),
) -> UserResponse:
"""Register a new user.
**Request Body:**
- email: Valid email address (must be unique)
- password: Password (8-128 characters)
- display_name: Optional display name
**Returns:**
- 201 Created: User created successfully
- 409 Conflict: Email already registered
| [
"# fastapi/fastapi:fastapi/routing.py\nIRouter(r",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/sql/_selectable_constructors.py\nselect",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/ext/asyncio/session.py\nAsyncSession",
"# yusufkaraaslan/lazy-bird:lazy_bird/api/dependencies.py\nget_async_database",
"# yusufkaraasla... | yusufkaraaslan/lazy-bird | lazy_bird/api/routers/auth.py |
"""ApiKeys API endpoints for managing API authentication tokens.
This module provides CRUD operations for API keys with:
- Secure key generation (shown only once at creation)
- SHA-256 key hashing for storage
- Project-scoped or organization-level keys
- Scope-based permissions (read, write, admin)
- Key expiration and revocation
"""
from datetime import datetime, timezone
from typing import Optional
from uuid import UUID
from fastapi import APIRouter, Depends, Query, Response, status
from sqlalchemy import func, select
from sqlalchemy.ext.asyncio import AsyncSession
from lazy_bird.api.dependencies import RequireAdmin, RequireRead, get_async_database
from lazy_bird.api.exceptions import ResourceConflictError, ResourceNotFoundError
from lazy_bird.core.logging import get_logger
from lazy_bird.core.security import generate_api_key, get_api_key_prefix, hash_api_key
from lazy_bird.models.api_key import ApiKey
from lazy_bird.models.project import Project
from lazy_bird.schemas.api_key import (
ApiKeyCreate,
ApiKeyCreateResponse,
ApiKeyListResponse,
ApiKeyResponse,
ApiKeyUpdate,
)
logger = get_logger(__name__)
# Create router
router = APIRouter(prefix="/api-keys", tags=["api-keys"])
@router.get("", response_model=ApiKeyListResponse)
@router.get("/", response_model=ApiKeyListResponse)
async def list_api_keys(
# Pagination
page: int = Query(1, ge=1, description="Page number (1-indexed)"),
page_size: int = Query(20, ge=1, le=100, description="Items per page"),
# Filtering
project_id: Optional[UUID] = Query(
None, description="Filter by project ID (omit for organization-level keys)"
),
is_active: Optional[bool] = Query(None, description="Filter by active status"),
scope: Optional[str] = Query(None, description="Filter by scope (read, write, admin)"),
# Dependencies
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireRead),
) -> ApiKeyListResponse:
"""List all API keys with pagination and filtering.
**Pagination:**
- Offset-based pagination with page and page_size
- Default: page=1, page_size=20
- Max page_size: 100
**Filtering:**
- `project_id`: UUID - Filter by project (NULL for org-level keys)
- `is_active`: boolean - Filter by active status
- `scope`: string - Filter by scope (read, write, admin)
**Returns:**
- List of API keys (only key_prefix shown, not full key)
- Total count, page info, pagination metadata
**Authentication:**
- Requires: API key with 'read', 'write', or 'admin' scope
**Security:**
- Full API keys are NEVER returned (only key_prefix)
- Keys are shown in full ONLY once during creation
"""
# Build base query
query = select(ApiKey)
# Apply filters
filters = []
# Filter by project ID
if project_id is not None:
filters.append(ApiKey.project_id == project_id)
# Filter by active status
if is_active is not None:
filters.append(ApiKey.is_active == is_active)
# Filter by scope (check if array contains the scope)
if scope:
filters.append(ApiKey.scopes.contains([scope]))
# Add all filters to query
if filters:
query = query.where(*filters)
# Get total count
count_query = select(func.count()).select_from(ApiKey)
if filters:
count_query = count_query.where(*filters)
total_result = await db.execute(count_query)
total = total_result.scalar() or 0
# Calculate pagination
offset = (page - 1) * page_size
pages = (total + page_size - 1) // page_size if total > 0 else 0
# Apply pagination and sorting (most recent first)
query = query.order_by(ApiKey.created_at.desc()).offset(offset).limit(page_size)
# Execute query
result = await db.execute(query)
keys = result.scalars().all()
# Convert to response models (key_prefix only, not full key!)
key_responses = [ApiKeyResponse.model_validate(k) for k in keys]
# Log successful query
logger.info(
f"Listed {len(keys)} API keys (page {page}/{pages})",
extra={
"extra_fields": {
"total": total,
"page": page,
"page_size": page_size,
"filters": {
"project_id": str(project_id) if project_id else None,
"is_active": is_active,
"scope": scope,
},
}
},
)
return ApiKeyListResponse(
items=key_responses,
total=total,
page=page,
page_size=page_size,
pages=pages,
)
@router.post("", response_model=ApiKeyCreateResponse, status_code=status.HTTP_201_CREATED)
@router.post("/", response_model=ApiKeyCreateResponse, status_code=status.HTTP_201_CREATED)
async def create_api_key(
key_data: ApiKeyCreate,
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireAdmin),
) -> ApiKeyCreateResponse:
"""Create a new API key.
**Required Fields:**
- name: Human-readable key name
**Optional Fields:**
- project_id: Project ID (NULL for organization-level key)
- scopes: Array of scopes (default: ["read"])
- expires_at: Expiration timestamp (NULL for no expiration)
**Valid Scopes:**
- read: Read-only access
- write: Read and write access (includes read)
- admin: Full administrative access (includes read and write)
**Returns:**
- 201 Created: Key created successfully
- **IMPORTANT:** Full key is returned ONLY ONCE - save it securely!
- 404 Not Found: Project doesn't exist (if project_id provided)
- 422 Validation Error: Invalid input data
**Authentication:**
- Requires: API key with 'admin' scope
**Security:**
- Key is hashed with SHA-256 before storage
- Only key_prefix (first 8 chars) is stored in plain text
- Full key is shown ONLY in this response - cannot be recovered later
"""
# Validate project exists if project_id provided
if key_data.project_id:
project_query = select(Project).where(Project.id == key_data.project_id)
project_result = await db.execute(project_query)
project = project_result.scalar_one_or_none()
if not project:
raise ResourceNotFoundError(
resource_type="Project",
resource_id=str(key_data.project_id),
)
# Generate secure API key
raw_key = generate_api_key(prefix="lb")
key_hash = hash_api_key(raw_key)
key_prefix = get_api_key_prefix(raw_key)
# Create API key record
new_key = ApiKey(
key_hash=key_hash,
key_prefix=key_prefix,
name=key_data.name,
project_id=key_data.project_id,
scopes=key_data.scopes,
expires_at=key_data.expires_at,
is_active=True,
created_at=datetime.now(timezone.utc),
)
db.add(new_key)
await db.commit()
await db.refresh(new_key)
# Log successful creation (without sensitive data)
logger.info(
f"Created API key: {new_key.name} ({new_key.key_prefix})",
extra={
"extra_fields": {
"key_id": str(new_key.id),
"key_prefix": new_key.key_prefix,
"scopes": new_key.scopes,
"project_id": str(new_key.project_id) if new_key.project_id else None,
}
},
)
# Return response with FULL KEY (only time it's shown!)
response_data = {
"id": new_key.id,
"key_prefix": new_key.key_prefix,
"name": new_key.name,
"project_id": new_key.project_id,
"scopes": new_key.scopes,
"is_active": new_key.is_active,
"expires_at": new_key.expires_at,
"last_used_at": new_key.last_used_at,
"created_by": new_key.created_by,
"created_at": new_key.created_at,
"revoked_at": new_key.revoked_at,
"key": raw_key, # Full key shown only once!
}
return ApiKeyCreateResponse(**response_data)
@router.get("/{key_id}", response_model=ApiKeyResponse)
async def get_api_key(
key_id: UUID,
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireRead),
) -> ApiKeyResponse:
"""Get a single API key by ID.
**Path Parameters:**
- key_id: UUID - Key identifier
**Returns:**
- Key details (only key_prefix shown, not full key)
**Errors:**
- 404 Not Found: Key doesn't exist
**Authentication:**
- Requires: API key with 'read', 'write', or 'admin' scope
**Security:**
- Full key is NEVER returned (only key_prefix)
- Key was shown in full only once during creation
"""
# Fetch key
query = select(ApiKey).where(ApiKey.id == key_id)
result = await db.execute(query)
key = result.scalar_one_or_none()
# Check if key exists
if not key:
raise ResourceNotFoundError(
resource_type="ApiKey",
resource_id=str(key_id),
)
# Log successful query
logger.info(
f"Retrieved API key: {key.name}",
extra={
"extra_fields": {
"key_id": str(key.id),
"key_prefix": key.key_prefix,
}
},
)
return ApiKeyResponse.model_validate(key)
@router.patch("/{key_id}", response_model=ApiKeyResponse)
async def update_api_key(
key_id: UUID,
update_data: ApiKeyUpdate,
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireAdmin),
) -> ApiKeyResponse:
"""Update an existing API key (partial update).
**Path Parameters:**
- key_id: UUID - Key identifier
**Request Body (all optional):**
- name: Key name
- scopes: Permission scopes array
- expires_at: Expiration timestamp
- is_active: Active status (set to false to revoke key)
**Returns:**
- Updated key
**Errors:**
- 404 Not Found: Key doesn't exist
- 422 Validation Error: Invalid field values
**Authentication:**
- Requires: API key with 'admin' scope
**Note:**
- Set `is_active=false` to revoke a key (cannot be un-revoked)
- Cannot change project_id after creation
- Cannot recover the full key (only prefix is stored)
"""
# Fetch existing key
query = select(ApiKey).where(ApiKey.id == key_id)
result = await db.execute(query)
key = result.scalar_one_or_none()
# Check if key exists
if not key:
raise ResourceNotFoundError(
resource_type="ApiKey",
resource_id=str(key_id),
)
# Get update data (only fields that were provided)
update_fields = update_data.model_dump(exclude_unset=True)
# If no fields to update, return existing key
if not update_fields:
logger.info(f"No fields to update for API key: {key.name}")
return ApiKeyResponse.model_validate(key)
# Apply updates
for field, value in update_fields.items():
setattr(key, field, value)
# If key is being revoked, set revoked_at timestamp
if update_fields.get("is_active") is False and not key.revoked_at:
key.revoked_at = datetime.now(timezone.utc)
# Commit changes
await db.commit()
await db.refresh(key)
# Log successful update
logger.info(
f"Updated API key: {key.name}",
extra={
"extra_fields": {
"key_id": str(key.id),
"updated_fields": list(update_fields.keys()),
}
},
)
return ApiKeyResponse.model_validate(key)
@router.delete("/{key_id}", status_code=status.HTTP_204_NO_CONTENT)
async def delete_api_key(
key_id: UUID,
db: AsyncSession = Depends(get_async_database),
api_key: ApiKey = Depends(RequireAdmin),
) -> Response:
"""Delete an API key.
**Path Parameters:**
- key_id: UUID - Key identifier
**Behavior:**
- Hard delete (permanently removes record)
- Immediately invalidates the key
**Returns:**
- 204 No Content: Key deleted successfully
**Errors:**
- 404 Not Found: Key doesn't exist
**Authentication:**
- Requires: API key with 'admin' scope
**Warning:**
- This is a hard delete - key cannot be recovered
- Consider setting `is_active=false` instead for soft revocation
"""
# Fetch existing key
query = select(ApiKey).where(ApiKey.id == key_id)
result = await db.execute(query)
key = result.scalar_one_or_none()
# Check if key exists
if not key:
raise ResourceNotFoundError(
resource_type="ApiKey",
resource_id=str(key_id),
)
# Delete the key
await db.delete(key)
await db.commit()
# Log deletion
logger.info(
f"Deleted API key: {key.name}",
extra={
"extra_fields": {
"key_id": str(key_id),
"key_prefix": key.key_prefix,
}
},
)
# Return 204 No Content (no response body)
return Response(status_code=status.HTTP_204_NO_CONTENT)
| """ApiKeys API endpoints for managing API authentication tokens.
This module provides CRUD operations for API keys with:
- Secure key generation (shown only once at creation)
- SHA-256 key hashing for storage
- Project-scoped or organization-level keys
- Scope-based permissions (read, write, admin)
- Key expiration and revocation
"""
from datetime import datetime, timezone
from typing import Optional
from uuid import UUID
from fastapi import APIRouter, Depends, Query, Response, status
from sqlalchemy import func, select
from sqlalchemy.ext.asyncio import AsyncSession
from lazy_bird.api.dependencies import RequireAdmin, RequireRead, get_async_database
from lazy_bird.api.exceptions import ResourceConflictError, ResourceNotFoundError
from lazy_bird.core.logging import get_logger
from lazy_bird.core.security import generate_api_key, get_api_key_prefix, hash_api_key
from lazy_bird.models.api_key import ApiKey
from lazy_bird.models.project import Project
from lazy_bird.schemas.api_key import (
ApiKeyCreate,
ApiKeyCreateResponse,
ApiKeyListResponse,
ApiKeyResponse,
ApiKeyUpdate,
)
logger = get_logger(__name__)
# Create router
router = APIRouter(prefix="/api-keys", tags=["api-keys"])
@router.get("", response_model=ApiKeyListResponse)
@router.get("/", response_model=ApiKeyListResponse)
async def list_api_keys(
# Pagination
page: int = Query(1, ge=1, description="Page number (1-indexed)"),
page_size: int = Query(20, ge=1, le=100, description="Items per page"),
# Filtering
project_id: Optional[UUID] = Query(
None, description="Filter by | [
"# fastapi/fastapi:fastapi/routing.py\nIRouter(r",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/ext/asyncio/session.py\nAsyncSession",
"# yusufkaraaslan/lazy-bird:lazy_bird/api/exceptions.py\nResourceConflictError",
"# yusufkaraaslan/lazy-bird:lazy_bird/core/logging.py\nget_logger",
"# yusufkaraaslan/lazy-bird:l... | yusufkaraaslan/lazy-bird | lazy_bird/api/routers/api_keys.py |
"""API routers for Lazy-Bird endpoints.
This package contains all API route handlers organized by resource:
- health: Health check and monitoring endpoints
- projects: Project management
- claude_accounts: Claude account configuration
- framework_presets: Framework preset management
- task_runs: Task execution and monitoring
- webhooks: Webhook subscription management
- api_keys: API key management
- auth: Authentication endpoints
"""
from lazy_bird.api.routers.api_keys import router as api_keys_router
from lazy_bird.api.routers.auth import router as auth_router
from lazy_bird.api.routers.claude_accounts import router as claude_accounts_router
from lazy_bird.api.routers.framework_presets import router as framework_presets_router
from lazy_bird.api.routers.health import router as health_router
from lazy_bird.api.routers.projects import router as projects_router
from lazy_bird.api.routers.task_runs import router as task_runs_router
from lazy_bird.api.routers.webhooks import router as webhooks_router
__all__ = [
"api_keys_router",
"auth_router",
"claude_accounts_router",
"framework_presets_router",
"health_router",
"projects_router",
"task_runs_router",
"webhooks_router",
]
| """API routers for Lazy-Bird endpoints.
This package contains all API route handlers organized by resource:
- health: Health check and monitoring endpoints
- projects: Project management
- claude_accounts: Claude account configuration
- framework_presets: Framework preset management
- task_runs: Task execution and monitoring
- webhooks: Webhook subscription management
- api_keys: API key management
- auth: Authentication endpoints
"""
from lazy_bird.api.routers.api_keys import router as api_keys_router
from lazy_bird.api.routers.auth import router as auth_router
from lazy_bird.api.routers.claude_accounts import router as claude_accounts_router
from lazy_bird.api.routers.framework_presets import router as framework_presets_router
from lazy_bird.api.routers.health import router as health_router
from lazy_bird.api.routers.projects import router as projects_router
from lazy_bird.api.routers.task_runs import router as task_runs_router
from lazy_bird.api.routers.webhooks import router as webhooks_router
__all__ = [
"api_keys_router",
"auth_router",
"claude_accounts_router",
"framework_presets_router",
"health_router",
"projects_router",
"task_runs_router",
"webhooks_router",
]
| [] | yusufkaraaslan/lazy-bird | lazy_bird/api/routers/__init__.py |
"""Middleware components for FastAPI application.
This module provides middleware for:
- CORS configuration
- Request logging with correlation IDs
- Error handling and exception tracking
- Request ID generation
"""
import time
import uuid
from typing import Callable
from fastapi import Request, Response, status
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.types import ASGIApp
from lazy_bird.core.config import settings
from lazy_bird.core.logging import (
clear_correlation_id,
get_logger,
log_with_context,
set_correlation_id,
)
logger = get_logger(__name__)
def setup_cors(app: ASGIApp) -> None:
"""Configure CORS middleware for the application.
Args:
app: FastAPI application instance
Example:
>>> from fastapi import FastAPI
>>> app = FastAPI()
>>> setup_cors(app)
"""
app.add_middleware(
CORSMiddleware,
allow_origins=settings.CORS_ORIGINS,
allow_credentials=True,
allow_methods=["*"], # Allow all methods (GET, POST, PUT, DELETE, etc.)
allow_headers=["*"], # Allow all headers
expose_headers=["X-Request-ID", "X-Correlation-ID"],
)
logger.info(
"CORS middleware configured", extra={"extra_fields": {"origins": settings.CORS_ORIGINS}}
)
class RequestIDMiddleware(BaseHTTPMiddleware):
"""Middleware to add unique request ID to each request.
Generates a UUID for each request and adds it to:
- Request state (request.state.request_id)
- Response headers (X-Request-ID)
- Logging context (correlation_id)
"""
async def dispatch(self, request: Request, call_next: Callable) -> Response:
"""Process request and add request ID.
Args:
request: Incoming request
call_next: Next middleware/route handler
Returns:
Response: Response with X-Request-ID header
"""
# Check if request already has an ID (from client)
request_id = request.headers.get("X-Request-ID")
# Generate new ID if not provided
if not request_id:
request_id = str(uuid.uuid4())
# Store in request state
request.state.request_id = request_id
# Set correlation ID for logging
set_correlation_id(request_id)
try:
# Process request
response = await call_next(request)
# Add request ID to response headers
response.headers["X-Request-ID"] = request_id
response.headers["X-Correlation-ID"] = request_id
return response
finally:
# Clear correlation ID after request
clear_correlation_id()
class RequestLoggingMiddleware(BaseHTTPMiddleware):
"""Middleware to log all HTTP requests and responses.
Logs:
- Request method, path, query params
- Request headers (configurable)
- Response status code
- Response time
- Client IP address
"""
async def dispatch(self, request: Request, call_next: Callable) -> Response:
"""Process and log request.
Args:
request: Incoming request
call_next: Next middleware/route handler
Returns:
Response: Response from route handler
"""
# Start timer
start_time = time.time()
# Extract request info
request_id = getattr(request.state, "request_id", "unknown")
client_ip = request.client.host if request.client else "unknown"
method = request.method
path = request.url.path
query_params = str(request.query_params) if request.query_params else None
# Log incoming request
log_with_context(
logger,
20, # INFO level
f"{method} {path}",
request_id=request_id,
client_ip=client_ip,
method=method,
path=path,
query_params=query_params,
)
# Process request
try:
response = await call_next(request)
except Exception as e:
# Log error and re-raise
duration = time.time() - start_time
log_with_context(
logger,
40, # ERROR level
f"{method} {path} - Exception: {str(e)}",
request_id=request_id,
client_ip=client_ip,
method=method,
path=path,
duration_ms=round(duration * 1000, 2),
error=str(e),
error_type=type(e).__name__,
)
raise
# Calculate response time
duration = time.time() - start_time
# Log response
log_with_context(
logger,
20, # INFO level
f"{method} {path} - {response.status_code}",
request_id=request_id,
client_ip=client_ip,
method=method,
path=path,
status_code=response.status_code,
duration_ms=round(duration * 1000, 2),
)
# Add response time header
response.headers["X-Response-Time"] = f"{round(duration * 1000, 2)}ms"
return response
class ErrorHandlingMiddleware(BaseHTTPMiddleware):
"""Middleware to catch and handle uncaught exceptions.
Converts unhandled exceptions into proper JSON error responses.
Logs all errors with full context.
"""
async def dispatch(self, request: Request, call_next: Callable) -> Response:
"""Process request and handle errors.
Args:
request: Incoming request
call_next: Next middleware/route handler
Returns:
Response: Response from handler or error response
"""
try:
return await call_next(request)
except Exception as exc:
# Get request ID for error tracking
request_id = getattr(request.state, "request_id", "unknown")
# Log the error
log_with_context(
logger,
40, # ERROR level
f"Unhandled exception: {str(exc)}",
request_id=request_id,
error=str(exc),
error_type=type(exc).__name__,
method=request.method,
path=request.url.path,
)
# Return JSON error response
if settings.DEBUG:
# Include detailed error info in debug mode
error_detail = {
"error": type(exc).__name__,
"message": str(exc),
"request_id": request_id,
}
else:
# Generic error in production
error_detail = {
"error": "Internal Server Error",
"message": "An unexpected error occurred",
"request_id": request_id,
}
return JSONResponse(
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
content=error_detail,
headers={
"X-Request-ID": request_id,
},
)
class SecurityHeadersMiddleware(BaseHTTPMiddleware):
"""Middleware to add security headers to all responses.
Adds:
- X-Content-Type-Options: nosniff
- X-Frame-Options: DENY
- X-XSS-Protection: 1; mode=block
- Strict-Transport-Security (HSTS) in production
"""
async def dispatch(self, request: Request, call_next: Callable) -> Response:
"""Add security headers to response.
Args:
request: Incoming request
call_next: Next middleware/route handler
Returns:
Response: Response with security headers
"""
response = await call_next(request)
# Add security headers
response.headers["X-Content-Type-Options"] = "nosniff"
response.headers["X-Frame-Options"] = "DENY"
response.headers["X-XSS-Protection"] = "1; mode=block"
# Add HSTS in production (not in development/testing)
if not settings.DEBUG:
response.headers["Strict-Transport-Security"] = "max-age=31536000; includeSubDomains"
return response
class RateLimitMiddleware(BaseHTTPMiddleware):
"""Middleware for rate limiting requests.
Implements token bucket algorithm using Redis for distributed rate limiting.
Tracks requests per IP address and/or API key.
"""
def __init__(self, app: ASGIApp, requests_per_minute: int = 60):
"""Initialize rate limiter.
Args:
app: ASGI application
requests_per_minute: Max requests per minute (default: 60)
"""
super().__init__(app)
self.requests_per_minute = requests_per_minute
self.window_seconds = 60
async def dispatch(self, request: Request, call_next: Callable) -> Response:
"""Check rate limit and process request.
Args:
request: Incoming request
call_next: Next middleware/route handler
Returns:
Response: Response from handler or 429 if rate limited
"""
# Get client identifier (IP or API key)
client_ip = request.client.host if request.client else "unknown"
api_key = request.headers.get("X-API-Key", "")
# Use API key if present, otherwise IP
identifier = api_key[:16] if api_key else client_ip
# Check rate limit using Redis
try:
from lazy_bird.core.redis import get_redis
redis_client = get_redis()
key = f"rate_limit:{identifier}"
# Get current count
current = redis_client.get(key)
if current and int(current) >= self.requests_per_minute:
# Rate limit exceeded
log_with_context(
logger,
30, # WARNING level
f"Rate limit exceeded for {identifier}",
client_ip=client_ip,
current_requests=int(current),
limit=self.requests_per_minute,
)
return JSONResponse(
status_code=status.HTTP_429_TOO_MANY_REQUESTS,
content={
"error": "Rate limit exceeded",
"message": f"Maximum {self.requests_per_minute} requests per minute",
"retry_after": self.window_seconds,
},
headers={
"Retry-After": str(self.window_seconds),
"X-RateLimit-Limit": str(self.requests_per_minute),
"X-RateLimit-Remaining": "0",
"X-RateLimit-Reset": str(self.window_seconds),
},
)
# Increment counter
pipe = redis_client.pipeline()
pipe.incr(key)
if not current:
# Set expiry on first request in window
pipe.expire(key, self.window_seconds)
pipe.execute()
# Get updated count
new_count = int(current) + 1 if current else 1
remaining = max(0, self.requests_per_minute - new_count)
except Exception as e:
# If Redis fails, allow request (fail open)
logger.warning(f"Rate limiting unavailable: {e}")
remaining = self.requests_per_minute
# Process request
response = await call_next(request)
# Add rate limit headers
response.headers["X-RateLimit-Limit"] = str(self.requests_per_minute)
response.headers["X-RateLimit-Remaining"] = str(remaining)
return response
def setup_middleware(app: ASGIApp) -> None:
"""Configure all middleware for the application.
Order matters! Middleware is executed in reverse order of addition.
First added = outermost layer = executes first on request, last on response.
Execution order (request):
1. SecurityHeadersMiddleware
2. RateLimitMiddleware
3. ErrorHandlingMiddleware
4. RequestLoggingMiddleware
5. RequestIDMiddleware
6. CORS (if applicable)
7. Route handler
Args:
app: FastAPI application instance
Example:
>>> from fastapi import FastAPI
>>> app = FastAPI()
>>> setup_middleware(app)
"""
# CORS - outermost layer (processes first on request)
setup_cors(app)
# Request ID - establishes correlation ID for logging
app.add_middleware(RequestIDMiddleware)
# Request logging - logs with correlation ID
app.add_middleware(RequestLoggingMiddleware)
# Error handling - catches all unhandled exceptions
app.add_middleware(ErrorHandlingMiddleware)
# Rate limiting - prevent abuse
app.add_middleware(RateLimitMiddleware, requests_per_minute=settings.RATE_LIMIT_PER_MINUTE)
# Security headers - adds security headers to all responses
app.add_middleware(SecurityHeadersMiddleware)
logger.info("All middleware configured successfully")
| """Middleware components for FastAPI application.
This module provides middleware for:
- CORS configuration
- Request logging with correlation IDs
- Error handling and exception tracking
- Request ID generation
"""
import time
import uuid
from typing import Callable
from fastapi import Request, Response, status
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from starlette.middleware.base import BaseHTTPMiddleware
from starlette.types import ASGIApp
from lazy_bird.core.config import settings
from lazy_bird.core.logging import (
clear_correlation_id,
get_logger,
log_with_context,
set_correlation_id,
)
logger = get_logger(__name__)
def setup_cors(app: ASGIApp) -> None:
"""Configure CORS middleware for the application.
Args:
app: FastAPI application instance
Example:
>>> from fastapi import FastAPI
>>> app = FastAPI()
>>> setup_cors(app)
"""
app.add_middleware(
CORSMiddleware,
allow_origins=settings.CORS_ORIGINS,
allow_credentials=True,
allow_methods=["*"], # Allow all methods (GET, POST, PUT, DELETE, etc.)
allow_headers=["*"], # Allow all headers
expose_headers=["X-Request-ID", "X-Correlation-ID"],
)
logger.info(
"CORS middleware configured", extra={"extra_fields": {"origins": settings.CORS_ORIGINS}}
)
class | [
"# Kludex/starlette:starlette/middleware/base.py\nBaseHTTPMiddleware",
"# yusufkaraaslan/lazy-bird:lazy_bird/core/logging.py\nclear_correlation_id",
"# fastapi/fastapi:fastapi/openapi/models.py\nResponse"
] | yusufkaraaslan/lazy-bird | lazy_bird/api/middleware.py |
"""Main FastAPI application for Lazy-Bird v2.0.
This module initializes the FastAPI application with:
- Middleware configuration (CORS, logging, error handling)
- OpenAPI documentation
- Lifecycle events (startup/shutdown)
- API routers
"""
from contextlib import asynccontextmanager
from typing import AsyncGenerator
from fastapi import FastAPI
from fastapi.responses import JSONResponse
from lazy_bird.api.middleware import setup_middleware
from lazy_bird.core.config import settings
from lazy_bird.core.database import (
check_async_db_connection,
check_db_connection,
drop_async_db,
drop_db,
init_async_db,
init_db,
)
from lazy_bird.core.logging import get_logger, setup_logging
from lazy_bird.core.redis import (
check_async_redis_connection,
check_redis_connection,
close_async_redis,
close_redis,
)
from lazy_bird.services.preset_seeder import seed_framework_presets
logger = get_logger(__name__)
@asynccontextmanager
async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]:
"""Application lifespan manager.
Handles startup and shutdown tasks:
- Database initialization and connection checks
- Redis connection checks
- Resource cleanup on shutdown
Args:
app: FastAPI application instance
Yields:
None
"""
# Startup tasks
logger.info("Starting Lazy-Bird API v2.0...")
# Initialize logging first
setup_logging()
logger.info("Logging configured")
# Initialize databases
try:
if settings.USE_ASYNC_DB:
logger.info("Initializing async database...")
init_async_db()
if await check_async_db_connection():
logger.info("✓ Async database connection successful")
else:
logger.error("✗ Async database connection failed")
else:
logger.info("Initializing sync database...")
init_db()
if check_db_connection():
logger.info("✓ Database connection successful")
else:
logger.error("✗ Database connection failed")
except Exception as e:
logger.error(f"Database initialization failed: {e}")
# Seed framework presets from YAML
try:
if settings.USE_ASYNC_DB:
from lazy_bird.core.database import AsyncSessionLocal
async with AsyncSessionLocal() as session:
created, updated = await seed_framework_presets(session)
logger.info(f"Framework presets seeded: {created} created, {updated} updated")
else:
logger.info(
"Skipping async preset seeding (sync DB mode); " "presets can be seeded manually"
)
except Exception as e:
logger.warning(f"Framework preset seeding failed (non-fatal): {e}")
# Check Redis connection
try:
if settings.USE_ASYNC_DB:
if await check_async_redis_connection():
logger.info("✓ Async Redis connection successful")
else:
logger.warning("✗ Async Redis connection failed (Celery may not work)")
else:
if check_redis_connection():
logger.info("✓ Redis connection successful")
else:
logger.warning("✗ Redis connection failed (Celery may not work)")
except Exception as e:
logger.warning(f"Redis connection check failed: {e}")
logger.info("Lazy-Bird API v2.0 started successfully")
logger.info(f"Environment: {settings.ENVIRONMENT}")
logger.info(f"Debug mode: {settings.DEBUG}")
logger.info(f"API documentation: http://{settings.HOST}:{settings.PORT}/docs")
# Yield control to the application
yield
# Shutdown tasks
logger.info("Shutting down Lazy-Bird API v2.0...")
# Close Redis connections
try:
if settings.USE_ASYNC_DB:
await close_async_redis()
logger.info("Async Redis connection closed")
else:
close_redis()
logger.info("Redis connection closed")
except Exception as e:
logger.error(f"Error closing Redis connection: {e}")
# Drop database connections if in test mode
if settings.ENVIRONMENT == "testing":
try:
if settings.USE_ASYNC_DB:
drop_async_db()
logger.info("Async database connections dropped")
else:
drop_db()
logger.info("Database connections dropped")
except Exception as e:
logger.error(f"Error dropping database connections: {e}")
logger.info("Lazy-Bird API v2.0 shutdown complete")
# Create FastAPI application instance
app = FastAPI(
title=settings.API_TITLE,
description="""
**Lazy-Bird v2.0** - Progressive Development Automation System
Enables Claude Code instances to work on software development tasks autonomously.
## Features
- 🤖 **Autonomous Task Execution**: Claude Code agents execute development tasks
- 📊 **Multi-Project Support**: Manage multiple projects from a single server
- 🔄 **Framework Agnostic**: Support for 15+ frameworks (Godot, Django, React, etc.)
- ✅ **Automated Testing**: Test runners for each framework
- 🔐 **Secure**: API key authentication, JWT tokens, encryption
- 📈 **Scalable**: From solo dev (Phase 1) to enterprise (Phase 6)
- 🌐 **RESTful API**: Complete REST API for all operations
- 📡 **Webhooks**: Event-driven notifications
- 💰 **Cost Tracking**: Claude API usage monitoring
## API Sections
- **Projects**: Manage development projects
- **Claude Accounts**: Configure Claude API or subscription accounts
- **Framework Presets**: Built-in framework configurations
- **Task Runs**: Execute and monitor automated tasks
- **Webhooks**: Event subscriptions
- **API Keys**: Authentication key management
- **Health**: System health and status endpoints
## Authentication
Most endpoints require authentication via:
- **API Key**: `X-API-Key` header
- **JWT Token**: `Authorization: Bearer <token>` header
## Links
- [GitHub Repository](https://github.com/yusyus/lazy-bird)
- [Documentation](https://github.com/yusyus/lazy-bird/tree/main/Docs)
- [Issue Tracker](https://github.com/yusyus/lazy-bird/issues)
""",
version=settings.API_VERSION,
openapi_url="/api/v1/openapi.json",
docs_url="/docs",
redoc_url="/redoc",
lifespan=lifespan,
# OpenAPI metadata
contact={
"name": "Lazy-Bird Team",
"url": "https://github.com/yusyus/lazy-bird",
"email": "support@lazy-bird.dev",
},
license_info={
"name": "MIT License",
"url": "https://github.com/yusyus/lazy-bird/blob/main/LICENSE",
},
openapi_tags=[
{
"name": "health",
"description": "System health and status endpoints",
},
{
"name": "projects",
"description": "Project management operations",
},
{
"name": "claude-accounts",
"description": "Claude account configuration",
},
{
"name": "framework-presets",
"description": "Framework preset management",
},
{
"name": "task-runs",
"description": "Task execution and monitoring",
},
{
"name": "webhooks",
"description": "Webhook subscription management",
},
{
"name": "api-keys",
"description": "API key management",
},
{
"name": "auth",
"description": "Authentication endpoints",
},
],
)
# Setup middleware
setup_middleware(app)
# Register exception handlers
from lazy_bird.api.exceptions import register_exception_handlers
register_exception_handlers(app)
# Import routers
from lazy_bird.api.routers import (
api_keys_router,
auth_router,
claude_accounts_router,
framework_presets_router,
health_router,
projects_router,
task_runs_router,
webhooks_router,
)
# Include API routers
app.include_router(health_router, prefix="/api/v1")
app.include_router(projects_router, prefix="/api/v1")
app.include_router(task_runs_router, prefix="/api/v1")
app.include_router(claude_accounts_router, prefix="/api/v1")
app.include_router(framework_presets_router, prefix="/api/v1")
app.include_router(api_keys_router, prefix="/api/v1")
app.include_router(webhooks_router, prefix="/api/v1")
app.include_router(auth_router, prefix="/api/v1")
# Root endpoint
@app.get("/", tags=["health"])
async def root() -> JSONResponse:
"""Root endpoint returning API information.
Returns:
JSONResponse: API name, version, and documentation links
"""
return JSONResponse(
content={
"name": settings.API_TITLE,
"version": settings.API_VERSION,
"status": "operational",
"environment": settings.ENVIRONMENT,
"docs": "/docs",
"redoc": "/redoc",
"openapi": "/api/v1/openapi.json",
}
)
if __name__ == "__main__":
import uvicorn
# Run with uvicorn when executed directly
uvicorn.run(
"lazy_bird.api.main:app",
host=settings.HOST,
port=settings.PORT,
reload=settings.DEBUG,
log_level=settings.LOG_LEVEL.lower(),
)
| """Main FastAPI application for Lazy-Bird v2.0.
This module initializes the FastAPI application with:
- Middleware configuration (CORS, logging, error handling)
- OpenAPI documentation
- Lifecycle events (startup/shutdown)
- API routers
"""
from contextlib import asynccontextmanager
from typing import AsyncGenerator
from fastapi import FastAPI
from fastapi.responses import JSONResponse
from lazy_bird.api.middleware import setup_middleware
from lazy_bird.core.config import settings
from lazy_bird.core.database import (
check_async_db_connection,
check_db_connection,
drop_async_db,
drop_db,
init_async_db,
init_db,
)
from lazy_bird.core.logging import get_logger, setup_logging
from lazy_bird.core.redis import (
check_async_redis_connection,
check_redis_connection,
close_async_redis,
close_redis,
)
from lazy_bird.services.preset_seeder import seed_framework_presets
logger = get_logger(__name__)
@asynccontextmanager
async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]:
"""Application lifespan manager.
Handles startup and shutdown tasks:
- Database initialization and connection checks
- Redis connection checks
- Resource cleanup on shutdown
Args:
app: FastAPI application instance
Yields:
None
"""
# Startup tasks
logger.info("Starting Lazy-Bird API v2.0...")
# Initialize logging first
setup_logging()
logger.info("Logging configured")
# Initialize databases
try:
| [
"# fastapi/fastapi:fastapi/applications.py\nFastAPI",
"# yusufkaraaslan/lazy-bird:lazy_bird/api/middleware.py\nsetup_middleware",
"# yusufkaraaslan/lazy-bird:lazy_bird/core/database.py\ncheck_async_db_connection",
"# yusufkaraaslan/lazy-bird:lazy_bird/core/logging.py\nget_logger",
"# yusufkaraaslan/lazy-bir... | yusufkaraaslan/lazy-bird | lazy_bird/api/main.py |
"""Custom exceptions and error handlers for Lazy-Bird API.
This module implements RFC 7807 Problem Details for HTTP APIs.
See: https://tools.ietf.org/html/rfc7807
"""
from typing import Any, Dict, Optional
from fastapi import HTTPException, Request, status
from fastapi.responses import JSONResponse
from pydantic import BaseModel, Field
from lazy_bird.core.logging import get_logger
logger = get_logger(__name__)
# -------------------------------------------------------------------------
# RFC 7807 Problem Details Schema
# -------------------------------------------------------------------------
class ProblemDetails(BaseModel):
"""RFC 7807 Problem Details schema.
Example:
{
"type": "https://lazy-bird.dev/errors/validation-error",
"title": "Validation Error",
"status": 422,
"detail": "The 'name' field is required",
"instance": "/api/v1/projects",
"request_id": "550e8400-e29b-41d4-a716-446655440000",
"errors": {...}
}
"""
type: str = Field(
...,
description="A URI reference that identifies the problem type",
examples=["https://lazy-bird.dev/errors/validation-error"],
)
title: str = Field(
...,
description="A short, human-readable summary of the problem type",
examples=["Validation Error"],
)
status: int = Field(
...,
ge=400,
le=599,
description="The HTTP status code",
examples=[422],
)
detail: str = Field(
...,
description="A human-readable explanation specific to this occurrence",
examples=["The 'name' field is required"],
)
instance: Optional[str] = Field(
default=None,
description="A URI reference that identifies the specific occurrence",
examples=["/api/v1/projects"],
)
# Extension members (non-standard)
request_id: Optional[str] = Field(
default=None,
description="Request ID for tracking",
)
errors: Optional[Dict[str, Any]] = Field(
default=None,
description="Additional error details (validation errors, etc.)",
)
# -------------------------------------------------------------------------
# Custom Exception Classes
# -------------------------------------------------------------------------
class LazyBirdException(HTTPException):
"""Base exception for Lazy-Bird API errors.
All custom exceptions inherit from this class.
"""
def __init__(
self,
status_code: int,
detail: str,
error_type: str = "about:blank",
title: Optional[str] = None,
errors: Optional[Dict[str, Any]] = None,
):
"""Initialize exception.
Args:
status_code: HTTP status code
detail: Human-readable error message
error_type: URI identifying error type (RFC 7807)
title: Short summary of error type
errors: Additional error details
"""
# Initialize parent with RFC 7807 headers
super().__init__(
status_code=status_code,
detail=detail,
headers={"Content-Type": "application/problem+json"},
)
self.error_type = error_type
self.title = title or self._default_title(status_code)
self.errors = errors
@staticmethod
def _default_title(status_code: int) -> str:
"""Get default title for status code.
Args:
status_code: HTTP status code
Returns:
str: Default title
"""
titles = {
400: "Bad Request",
401: "Unauthorized",
403: "Forbidden",
404: "Not Found",
409: "Conflict",
422: "Unprocessable Entity",
429: "Too Many Requests",
500: "Internal Server Error",
503: "Service Unavailable",
}
return titles.get(status_code, "Error")
def to_problem_details(self) -> Dict[str, Any]:
"""Convert exception to RFC 7807 Problem Details dict.
Returns:
Dict[str, Any]: RFC 7807 formatted problem details
"""
problem = {
"type": "about:blank",
"title": self.title,
"status": self.status_code,
"detail": self.detail,
}
# Merge errors dict into top level (flatten structure)
if self.errors:
problem.update(self.errors)
return problem
class ValidationError(LazyBirdException):
"""Validation error (422)."""
def __init__(
self,
detail: str,
field: Optional[str] = None,
reason: Optional[str] = None,
errors: Optional[Dict[str, Any]] = None,
):
"""Initialize validation error.
Args:
detail: Human-readable error message
field: Field name that failed validation
reason: Reason for validation failure
errors: Field-level validation errors (alternative to field/reason)
"""
# Store as instance attributes for test access
self.field = field
self.reason = reason
# Build errors dict from field/reason if provided
if field is not None and errors is None:
errors = {"field": field}
if reason is not None:
errors["reason"] = reason
super().__init__(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail=detail,
error_type="https://lazy-bird.dev/errors/validation-error",
title="Unprocessable Entity",
errors=errors,
)
class ResourceNotFoundError(LazyBirdException):
"""Resource not found (404)."""
def __init__(
self,
resource_type: str,
resource_id: str,
detail: Optional[str] = None,
):
"""Initialize not found error.
Args:
resource_type: Type of resource (e.g., "Project", "ApiKey")
resource_id: ID of resource that wasn't found
detail: Optional custom detail message
"""
# Store as instance attributes for test access
self.resource_type = resource_type
self.resource_id = resource_id
# Use custom detail or generate default
if detail is None:
detail = f"{resource_type} with ID '{resource_id}' not found"
super().__init__(
status_code=status.HTTP_404_NOT_FOUND,
detail=detail,
error_type="https://lazy-bird.dev/errors/not-found",
title="Not Found",
errors={"resource_type": resource_type, "resource_id": resource_id},
)
class ResourceConflictError(LazyBirdException):
"""Resource conflict (409)."""
def __init__(
self,
detail: str,
conflict_field: Optional[str] = None,
conflict_value: Optional[str] = None,
):
"""Initialize conflict error.
Args:
detail: Human-readable error message
conflict_field: Field that caused the conflict
conflict_value: Value that caused the conflict
"""
# Store as instance attributes for test access
self.conflict_field = conflict_field
self.conflict_value = conflict_value
# Build errors dict
errors = {}
if conflict_field is not None:
errors["conflict_field"] = conflict_field
if conflict_value is not None:
errors["conflict_value"] = conflict_value
super().__init__(
status_code=status.HTTP_409_CONFLICT,
detail=detail,
error_type="https://lazy-bird.dev/errors/conflict",
title="Conflict",
errors=errors if errors else None,
)
class AuthenticationError(LazyBirdException):
"""Authentication error (401)."""
def __init__(
self,
detail: str = "Authentication required",
):
"""Initialize authentication error.
Args:
detail: Human-readable error message
"""
super().__init__(
status_code=status.HTTP_401_UNAUTHORIZED,
detail=detail,
error_type="https://lazy-bird.dev/errors/authentication-required",
title="Unauthorized",
)
# Add WWW-Authenticate header for 401 responses
self.headers["WWW-Authenticate"] = 'Bearer realm="api"'
class AuthorizationError(LazyBirdException):
"""Authorization error / insufficient permissions (403)."""
def __init__(
self,
detail: str = "Insufficient permissions",
required_scope: Optional[str] = None,
user_scopes: Optional[list[str]] = None,
):
"""Initialize authorization error.
Args:
detail: Human-readable error message
required_scope: Single required permission scope
user_scopes: User's current scopes
"""
# Store as instance attributes for test access
self.required_scope = required_scope
self.user_scopes = user_scopes
errors = {}
if required_scope:
errors["required_scope"] = required_scope
if user_scopes:
errors["user_scopes"] = user_scopes
super().__init__(
status_code=status.HTTP_403_FORBIDDEN,
detail=detail,
error_type="https://lazy-bird.dev/errors/insufficient-permissions",
title="Forbidden",
errors=errors if errors else None,
)
class InsufficientPermissionsError(AuthorizationError):
"""Insufficient permissions (403) - alias for backward compatibility."""
def __init__(
self,
detail: str,
required_scopes: Optional[list[str]] = None,
):
"""Initialize permissions error.
Args:
detail: Human-readable error message
required_scopes: Required permission scopes
"""
# Convert to new format
required_scope = required_scopes[0] if required_scopes else None
super().__init__(
detail=detail,
required_scope=required_scope,
)
class ExternalServiceError(LazyBirdException):
"""External service error (503)."""
def __init__(
self,
service: str,
detail: str,
):
"""Initialize service error.
Args:
service: Name of external service
detail: Human-readable error message
"""
super().__init__(
status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
detail=detail,
error_type="https://lazy-bird.dev/errors/service-unavailable",
title="External Service Error",
errors={"service": service},
)
# -------------------------------------------------------------------------
# Exception Handlers
# -------------------------------------------------------------------------
async def lazy_bird_exception_handler(request: Request, exc: LazyBirdException) -> JSONResponse:
"""Handle custom Lazy-Bird exceptions with RFC 7807 format.
Args:
request: FastAPI request
exc: LazyBirdException instance
Returns:
JSONResponse: RFC 7807 formatted error response
"""
# Get request ID from state (set by RequestIDMiddleware)
request_id = getattr(request.state, "request_id", None)
# Create problem details
problem = ProblemDetails(
type=exc.error_type,
title=exc.title,
status=exc.status_code,
detail=exc.detail,
instance=str(request.url.path),
request_id=request_id,
errors=exc.errors,
)
# Log error
logger.error(
f"{exc.title}: {exc.detail}",
extra={
"extra_fields": {
"status_code": exc.status_code,
"error_type": exc.error_type,
"request_id": request_id,
"path": str(request.url.path),
"errors": exc.errors,
}
},
)
return JSONResponse(
status_code=exc.status_code,
content=problem.model_dump(exclude_none=True),
headers={
"Content-Type": "application/problem+json",
"X-Request-ID": request_id or "unknown",
},
)
async def http_exception_handler(request: Request, exc: HTTPException) -> JSONResponse:
"""Handle standard HTTPException with RFC 7807 format.
Args:
request: FastAPI request
exc: HTTPException instance
Returns:
JSONResponse: RFC 7807 formatted error response
"""
# Get request ID from state
request_id = getattr(request.state, "request_id", None)
# Map status code to error type
error_types = {
400: ("https://lazy-bird.dev/errors/bad-request", "Bad Request"),
401: ("https://lazy-bird.dev/errors/unauthorized", "Unauthorized"),
403: ("https://lazy-bird.dev/errors/forbidden", "Forbidden"),
404: ("https://lazy-bird.dev/errors/not-found", "Not Found"),
405: ("https://lazy-bird.dev/errors/method-not-allowed", "Method Not Allowed"),
409: ("https://lazy-bird.dev/errors/conflict", "Conflict"),
422: ("https://lazy-bird.dev/errors/validation-error", "Validation Error"),
429: ("https://lazy-bird.dev/errors/rate-limit-exceeded", "Rate Limit Exceeded"),
500: ("https://lazy-bird.dev/errors/internal-server-error", "Internal Server Error"),
503: ("https://lazy-bird.dev/errors/service-unavailable", "Service Unavailable"),
}
error_type, title = error_types.get(exc.status_code, ("about:blank", "Error"))
# Create problem details
problem = ProblemDetails(
type=error_type,
title=title,
status=exc.status_code,
detail=exc.detail,
instance=str(request.url.path),
request_id=request_id,
)
# Log error
logger.warning(
f"{title}: {exc.detail}",
extra={
"extra_fields": {
"status_code": exc.status_code,
"request_id": request_id,
"path": str(request.url.path),
}
},
)
return JSONResponse(
status_code=exc.status_code,
content=problem.model_dump(exclude_none=True),
headers={
"Content-Type": "application/problem+json",
"X-Request-ID": request_id or "unknown",
},
)
async def validation_exception_handler(request: Request, exc: Exception) -> JSONResponse:
"""Handle Pydantic validation errors with RFC 7807 format.
Args:
request: FastAPI request
exc: Pydantic ValidationError
Returns:
JSONResponse: RFC 7807 formatted error response
"""
from pydantic import ValidationError as PydanticValidationError
# Get request ID
request_id = getattr(request.state, "request_id", None)
# Extract validation errors
errors = {}
if isinstance(exc, PydanticValidationError):
for error in exc.errors():
field = ".".join(str(loc) for loc in error["loc"])
errors[field] = error["msg"]
# Create problem details
problem = ProblemDetails(
type="https://lazy-bird.dev/errors/validation-error",
title="Validation Error",
status=status.HTTP_422_UNPROCESSABLE_ENTITY,
detail="Request validation failed",
instance=str(request.url.path),
request_id=request_id,
errors=errors,
)
# Log error
logger.warning(
"Validation error",
extra={
"extra_fields": {
"request_id": request_id,
"path": str(request.url.path),
"errors": errors,
}
},
)
return JSONResponse(
status_code=status.HTTP_422_UNPROCESSABLE_ENTITY,
content=problem.model_dump(exclude_none=True),
headers={
"Content-Type": "application/problem+json",
"X-Request-ID": request_id or "unknown",
},
)
def register_exception_handlers(app) -> None:
"""Register all exception handlers with FastAPI app.
Args:
app: FastAPI application instance
Example:
>>> from fastapi import FastAPI
>>> app = FastAPI()
>>> register_exception_handlers(app)
"""
from pydantic import ValidationError as PydanticValidationError
# Register custom exception handlers
app.add_exception_handler(LazyBirdException, lazy_bird_exception_handler)
app.add_exception_handler(HTTPException, http_exception_handler)
app.add_exception_handler(PydanticValidationError, validation_exception_handler)
logger.info("Exception handlers registered")
| """Custom exceptions and error handlers for Lazy-Bird API.
This module implements RFC 7807 Problem Details for HTTP APIs.
See: https://tools.ietf.org/html/rfc7807
"""
from typing import Any, Dict, Optional
from fastapi import HTTPException, Request, status
from fastapi.responses import JSONResponse
from pydantic import BaseModel, Field
from lazy_bird.core.logging import get_logger
logger = get_logger(__name__)
# -------------------------------------------------------------------------
# RFC 7807 Problem Details Schema
# -------------------------------------------------------------------------
class ProblemDetails(BaseModel):
"""RFC 7807 Problem Details schema.
Example:
{
"type": "https://lazy-bird.dev/errors/validation-error",
"title": "Validation Error",
"status": 422,
"detail": "The 'name' field is required",
"instance": "/api/v1/projects",
"request_id": "550e8400-e29b-41d4-a716-446655440000",
"errors": {...}
}
"""
type: str = Field(
...,
description="A URI reference that identifies the problem type",
examples=["https://lazy-bird.dev/errors/validation-error"],
)
title: str = Field(
...,
description="A short, human-readable summary of the problem type",
examples=["Validation Error"],
)
status: int = Field(
| [
"# fastapi/fastapi:fastapi/exceptions.py\nHTTPException",
"# pydantic/pydantic:pydantic/main.py\nBaseModel",
"# yusufkaraaslan/lazy-bird:lazy_bird/core/logging.py\nget_logger",
"# pydantic/pydantic:pydantic/v1/fields.py\nField"
] | yusufkaraaslan/lazy-bird | lazy_bird/api/exceptions.py |
"""Dependency injection for FastAPI endpoints.
This module provides dependency functions for:
- Database session management
- API key authentication
- JWT token authentication
- Current user retrieval
"""
from typing import AsyncGenerator, Generator, Optional
from fastapi import Depends, HTTPException, Header, status
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import Session
from lazy_bird.core.database import AsyncSessionLocal, SessionLocal, get_async_db, get_db
from lazy_bird.core.logging import get_logger
from lazy_bird.core.security import verify_token
from lazy_bird.models.api_key import ApiKey
logger = get_logger(__name__)
# Re-export database dependencies from core
def get_database() -> Generator[Session, None, None]:
"""Get synchronous database session.
Yields:
Session: SQLAlchemy session
Example:
>>> from fastapi import Depends
>>> @app.get("/")
>>> def endpoint(db: Session = Depends(get_database)):
>>> # Use db session
"""
yield from get_db()
async def get_async_database() -> AsyncGenerator[AsyncSession, None]:
"""Get asynchronous database session.
Yields:
AsyncSession: SQLAlchemy async session
Example:
>>> from fastapi import Depends
>>> @app.get("/")
>>> async def endpoint(db: AsyncSession = Depends(get_async_database)):
>>> # Use async db session
"""
async for session in get_async_db():
yield session
# API Key Authentication
async def get_api_key_from_header(
x_api_key: Optional[str] = Header(None, alias="X-API-Key"),
) -> Optional[str]:
"""Extract API key from X-API-Key header.
Args:
x_api_key: API key from header
Returns:
Optional[str]: API key if present, None otherwise
Example:
>>> curl -H "X-API-Key: lb_abc123..." http://localhost:8000/api/v1/projects
"""
return x_api_key
async def get_current_api_key(
api_key: Optional[str] = Depends(get_api_key_from_header),
db: AsyncSession = Depends(get_async_database),
) -> ApiKey:
"""Validate API key and return ApiKey model.
Args:
api_key: API key from header
db: Database session
Returns:
ApiKey: Validated ApiKey model
Raises:
HTTPException: 401 if API key is missing or invalid
HTTPException: 403 if API key is inactive or expired
Example:
>>> from fastapi import Depends
>>> @app.get("/protected")
>>> async def endpoint(api_key: ApiKey = Depends(get_current_api_key)):
>>> # api_key is validated and active
"""
from lazy_bird.core.security import hash_api_key
from sqlalchemy import select
# Check if API key provided
if not api_key:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="API key required. Provide X-API-Key header.",
headers={"WWW-Authenticate": "ApiKey"},
)
# Hash the provided key
hashed_key = hash_api_key(api_key)
# Query database for matching API key
result = await db.execute(
select(ApiKey).where(
ApiKey.key_hash == hashed_key,
ApiKey.is_active == True, # noqa: E712
)
)
api_key_model = result.scalar_one_or_none()
# Check if API key exists
if not api_key_model:
logger.warning(f"Invalid API key attempt: {api_key[:8]}...")
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid API key",
headers={"WWW-Authenticate": "ApiKey"},
)
# Check if API key is expired
from datetime import datetime
if api_key_model.expires_at and api_key_model.expires_at < datetime.utcnow():
logger.warning(f"Expired API key used: {api_key_model.key_prefix}")
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="API key has expired",
)
# Update last_used_at timestamp
api_key_model.last_used_at = datetime.utcnow()
await db.commit()
logger.info(
f"API key authenticated: {api_key_model.key_prefix}",
extra={"extra_fields": {"api_key_id": str(api_key_model.id)}},
)
return api_key_model
# JWT Token Authentication
async def get_token_from_header(
authorization: Optional[str] = Header(None, alias="Authorization"),
) -> Optional[str]:
"""Extract JWT token from Authorization header.
Args:
authorization: Authorization header value
Returns:
Optional[str]: JWT token if present and valid format, None otherwise
Example:
>>> curl -H "Authorization: Bearer eyJ..." http://localhost:8000/api/v1/projects
"""
if not authorization:
return None
# Check format: "Bearer <token>"
parts = authorization.split()
if len(parts) != 2 or parts[0].lower() != "bearer":
return None
return parts[1]
async def get_current_user(
token: Optional[str] = Depends(get_token_from_header),
) -> dict:
"""Validate JWT token and return user information.
Args:
token: JWT token from header
Returns:
dict: User information from token payload
Raises:
HTTPException: 401 if token is missing or invalid
Example:
>>> from fastapi import Depends
>>> @app.get("/me")
>>> async def endpoint(user: dict = Depends(get_current_user)):
>>> # user contains: {"sub": "user_id", "email": "user@example.com", ...}
"""
# Check if token provided
if not token:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Authentication required. Provide Authorization: Bearer <token> header.",
headers={"WWW-Authenticate": "Bearer"},
)
# Verify token
payload = verify_token(token)
if not payload:
logger.warning("Invalid JWT token attempt")
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid or expired token",
headers={"WWW-Authenticate": "Bearer"},
)
# Check required claims
if "sub" not in payload:
logger.error("JWT token missing 'sub' claim")
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid token: missing user identifier",
headers={"WWW-Authenticate": "Bearer"},
)
logger.info(
f"JWT authenticated: {payload.get('sub')}",
extra={"extra_fields": {"user_id": payload.get("sub")}},
)
return payload
# Optional Authentication (doesn't raise exception if missing)
async def get_optional_api_key(
api_key: Optional[str] = Depends(get_api_key_from_header),
db: AsyncSession = Depends(get_async_database),
) -> Optional[ApiKey]:
"""Optional API key validation (doesn't raise if missing).
Args:
api_key: API key from header
db: Database session
Returns:
Optional[ApiKey]: Validated ApiKey model or None if no key provided
Example:
>>> from fastapi import Depends
>>> @app.get("/public-or-private")
>>> async def endpoint(api_key: Optional[ApiKey] = Depends(get_optional_api_key)):
>>> if api_key:
>>> # Authenticated user
>>> else:
>>> # Public access
"""
if not api_key:
return None
try:
return await get_current_api_key(api_key, db)
except HTTPException:
return None
async def get_optional_user(
token: Optional[str] = Depends(get_token_from_header),
) -> Optional[dict]:
"""Optional JWT validation (doesn't raise if missing).
Args:
token: JWT token from header
Returns:
Optional[dict]: User payload or None if no token provided
Example:
>>> from fastapi import Depends
>>> @app.get("/public-or-private")
>>> async def endpoint(user: Optional[dict] = Depends(get_optional_user)):
>>> if user:
>>> # Authenticated user
>>> else:
>>> # Public access
"""
if not token:
return None
try:
return await get_current_user(token)
except HTTPException:
return None
# Permission Checking Dependencies
class RequireScopes:
"""Dependency to check API key has required scopes.
Example:
>>> from fastapi import Depends
>>> @app.delete("/projects/{id}")
>>> async def delete_project(
>>> api_key: ApiKey = Depends(RequireScopes(["write", "admin"]))
>>> ):
>>> # api_key has either "write" OR "admin" scope
"""
def __init__(self, required_scopes: list[str]):
"""Initialize scope checker.
Args:
required_scopes: List of acceptable scopes (any match is sufficient)
"""
self.required_scopes = required_scopes
async def __call__(
self,
api_key: ApiKey = Depends(get_current_api_key),
) -> ApiKey:
"""Check if API key has required scopes.
Args:
api_key: Validated API key
Returns:
ApiKey: API key model if scopes match
Raises:
HTTPException: 403 if API key lacks required scopes
"""
# Check if any required scope is present
api_key_scopes = set(api_key.scopes)
required_scopes_set = set(self.required_scopes)
if not api_key_scopes.intersection(required_scopes_set):
logger.warning(
f"Insufficient permissions for API key {api_key.key_prefix}. "
f"Required: {self.required_scopes}, Has: {api_key.scopes}"
)
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail=f"Insufficient permissions. Required scopes: {self.required_scopes}",
)
return api_key
# Convenience: Common permission dependencies
RequireRead = RequireScopes(["read", "write", "admin"])
RequireWrite = RequireScopes(["write", "admin"])
RequireAdmin = RequireScopes(["admin"])
| """Dependency injection for FastAPI endpoints.
This module provides dependency functions for:
- Database session management
- API key authentication
- JWT token authentication
- Current user retrieval
"""
from typing import AsyncGenerator, Generator, Optional
from fastapi import Depends, HTTPException, Header, status
from sqlalchemy.ext.asyncio import AsyncSession
from sqlalchemy.orm import Session
from lazy_bird.core.database import AsyncSessionLocal, SessionLocal, get_async_db, get_db
from lazy_bird.core.logging import get_logger
from lazy_bird.core.security import verify_token
from lazy_bird.models.api_key import ApiKey
logger = get_logger(__name__)
# Re-export database dependencies from core
def get_database() -> Generator[Session, None, None]:
"""Get synchronous database session.
Yields:
Session: SQLAlchemy session
Example:
>>> from fastapi import Depends
>>> @app.get("/")
>>> def endpoint(db: Session = Depends(get_database)):
>>> # Use db session
"""
yield from get_db()
async def get_async_database() -> AsyncGenerator[AsyncSession, None]:
"""Get asynchronous database session.
Yields:
AsyncSession: SQLAlchemy async session
Example:
>>> from fastapi import Depends
>>> @app.get("/")
>>> async def endpoint(db: AsyncSession = Depends(get_async_database)):
>>> # Use async db session
"""
async for session in get_async_db():
yield session
# API Key Authentication
| [
"# fastapi/fastapi:fastapi/param_functions.py\nDepends",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/ext/asyncio/session.py\nAsyncSession",
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/orm/session.py\nSession",
"# yusufkaraaslan/lazy-bird:lazy_bird/core/logging.py\nget_logger",
"# yusufkaraaslan/lazy-bird:lazy_bird/... | yusufkaraaslan/lazy-bird | lazy_bird/api/dependencies.py |
"""API layer for Lazy-Bird.
This package contains the FastAPI application and related components:
- middleware: Request/response middleware (CORS, logging, error handling)
- routers: API route handlers
- dependencies: FastAPI dependency injection functions
- exceptions: Custom exceptions and error handlers (RFC 7807)
"""
from lazy_bird.api.dependencies import (
RequireAdmin,
RequireRead,
RequireScopes,
RequireWrite,
get_async_database,
get_current_api_key,
get_current_user,
get_database,
get_optional_api_key,
get_optional_user,
)
from lazy_bird.api.exceptions import (
ExternalServiceError,
InsufficientPermissionsError,
LazyBirdException,
ProblemDetails,
ResourceConflictError,
ResourceNotFoundError,
ValidationError,
register_exception_handlers,
)
from lazy_bird.api.middleware import (
ErrorHandlingMiddleware,
RateLimitMiddleware,
RequestIDMiddleware,
RequestLoggingMiddleware,
SecurityHeadersMiddleware,
setup_cors,
setup_middleware,
)
__all__ = [
# Middleware classes
"RequestIDMiddleware",
"RequestLoggingMiddleware",
"ErrorHandlingMiddleware",
"SecurityHeadersMiddleware",
"RateLimitMiddleware",
# Setup functions
"setup_cors",
"setup_middleware",
# Dependencies - Database
"get_database",
"get_async_database",
# Dependencies - Authentication
"get_current_api_key",
"get_current_user",
"get_optional_api_key",
"get_optional_user",
# Dependencies - Permissions
"RequireScopes",
"RequireRead",
"RequireWrite",
"RequireAdmin",
# Exceptions - Custom Exception Classes
"LazyBirdException",
"ValidationError",
"ResourceNotFoundError",
"ResourceConflictError",
"InsufficientPermissionsError",
"ExternalServiceError",
# Exceptions - Schemas and Handlers
"ProblemDetails",
"register_exception_handlers",
]
| """API layer for Lazy-Bird.
This package contains the FastAPI application and related components:
- middleware: Request/response middleware (CORS, logging, error handling)
- routers: API route handlers
- dependencies: FastAPI dependency injection functions
- exceptions: Custom exceptions and error handlers (RFC 7807)
"""
from lazy_bird.api.dependencies import (
RequireAdmin,
RequireRead,
RequireScopes,
RequireWrite,
get_async_database,
get_current_api_key,
get_current_user,
get_database,
get_optional_api_key,
get_optional_user,
)
from lazy_bird.api.exceptions import (
ExternalServiceError,
InsufficientPermissionsError,
LazyBirdException,
ProblemDetails,
ResourceConflictError,
ResourceNotFoundError,
ValidationError,
register_exception_handlers,
)
from lazy_bird.api.middleware import (
ErrorHandlingMiddleware,
RateLimitMiddleware,
RequestIDMiddleware,
RequestLoggingMiddleware,
SecurityHeadersMiddleware,
setup_cors,
setup_middleware,
)
__all__ = [
# Middleware classes
"RequestIDMiddleware",
"RequestLoggingMiddleware",
"ErrorHandlingMiddleware",
"SecurityHeadersMiddleware",
"RateLimitMiddleware",
# Setup functions
"setup_cors",
"setup_middleware",
# Dependencies - Database
"get_database",
"get_async_database",
# Dependencies - Authentication
"get_current_api_key",
"get_current_user",
| [
"# yusufkaraaslan/lazy-bird:lazy_bird/api/exceptions.py\nExternalServiceError",
"# yusufkaraaslan/lazy-bird:lazy_bird/api/middleware.py\nErrorHandlingMiddleware"
] | yusufkaraaslan/lazy-bird | lazy_bird/api/__init__.py |
"""
Lazy_Bird - Automate development projects with Claude Code
A progressive automation system that enables Claude Code instances to work
on software development tasks autonomously. Supports 15+ frameworks including
Godot, Unity, Python, Rust, React, Django, and more.
"""
__version__ = "0.1.0"
__author__ = "Yusuf Karaaslan"
__license__ = "MIT"
from pathlib import Path
# Package root directory
PACKAGE_ROOT = Path(__file__).parent.parent
# Common directories
SCRIPTS_DIR = PACKAGE_ROOT / "scripts"
CONFIG_DIR = PACKAGE_ROOT / "config"
WEB_DIR = PACKAGE_ROOT / "web"
DOCS_DIR = PACKAGE_ROOT / "Docs"
__all__ = [
"__version__",
"__author__",
"__license__",
"PACKAGE_ROOT",
"SCRIPTS_DIR",
"CONFIG_DIR",
"WEB_DIR",
"DOCS_DIR",
]
| """
Lazy_Bird - Automate development projects with Claude Code
A progressive automation system that enables Claude Code instances to work
on software development tasks autonomously. Supports 15+ frameworks including
Godot, Unity, Python, Rust, React, Django, and more.
"""
__version__ = "0.1.0"
__author__ = "Yusuf Karaaslan"
__license__ = "MIT"
from pathlib import Path
# Package root directory
PACKAGE_ROOT = Path(__file__).parent.parent
# Common directories
SCRIPTS_DIR = PACKAGE_ROOT / "scripts"
CONFIG_DIR = PACKAGE_ROOT / "config"
WEB_DIR = PACKAGE_ROOT / "web"
DOCS_DIR = PACKAGE_ROOT / "Docs"
__all__ = [
"__version__",
"__author__",
"__license__",
"PACKAGE_ROOT",
"SCRIPTS_DIR",
"CONFIG_DIR",
"WEB_DIR",
"DOCS_DIR",
]
| [] | yusufkaraaslan/lazy-bird | lazy_bird/__init__.py |
"""Initial v2.0 schema
Revision ID: eaf8e2359e19
Revises:
Create Date: 2026-03-25 00:00:00.000000
Creates all 8 tables for Lazy-Bird v2.0:
1. framework_presets - Framework-specific command presets
2. claude_accounts - Claude API credentials and settings
3. projects - Project configurations and settings
4. task_runs - Task execution records
5. task_run_logs - Detailed task execution logs
6. webhook_subscriptions - Webhook endpoint registrations
7. daily_usage - Daily usage tracking and billing
8. api_keys - API authentication tokens
"""
from typing import Sequence, Union
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "eaf8e2359e19"
down_revision: Union[str, None] = None
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# =========================================================================
# 1. framework_presets (no FKs, referenced by projects)
# =========================================================================
op.create_table(
"framework_presets",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column(
"name",
sa.String(100),
nullable=False,
comment="Internal preset name (lowercase, unique)",
),
sa.Column(
"display_name", sa.String(255), nullable=False, comment="Human-readable display name"
),
sa.Column("description", sa.Text(), nullable=True, comment="Preset description"),
sa.Column(
"framework_type",
sa.String(50),
nullable=False,
comment="Framework category: game_engine, backend, frontend, language",
),
sa.Column(
"language",
sa.String(50),
nullable=True,
comment="Programming language: gdscript, python, javascript, rust, etc.",
),
sa.Column(
"test_command",
sa.String(500),
nullable=False,
comment="Command to run tests (required)",
),
sa.Column(
"build_command",
sa.String(500),
nullable=True,
comment="Command to build project (optional)",
),
sa.Column(
"lint_command", sa.String(500), nullable=True, comment="Command to lint code (optional)"
),
sa.Column(
"format_command",
sa.String(500),
nullable=True,
comment="Command to format code (optional)",
),
sa.Column(
"config_files",
postgresql.JSONB(astext_type=sa.Text()),
nullable=True,
comment="JSON object with framework-specific config file paths",
),
sa.Column(
"is_builtin",
sa.Boolean(),
server_default="false",
nullable=False,
comment="Built-in preset (cannot be deleted)",
),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=False,
comment="Creation timestamp",
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=False,
comment="Last update timestamp",
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name"),
comment="Framework-specific command presets and configurations",
)
op.create_index("ix_framework_presets_framework_type", "framework_presets", ["framework_type"])
op.create_index("ix_framework_presets_is_builtin", "framework_presets", ["is_builtin"])
# =========================================================================
# 2. claude_accounts (no FKs, referenced by projects & task_runs)
# =========================================================================
op.create_table(
"claude_accounts",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column("name", sa.String(255), nullable=False, comment="Human-readable account name"),
sa.Column(
"account_type",
sa.String(50),
nullable=False,
comment="Account type: api or subscription",
),
sa.Column(
"api_key",
sa.String(500),
nullable=True,
comment="Anthropic API key (encrypted at application layer)",
),
sa.Column(
"config_directory",
sa.String(500),
nullable=True,
comment="Config directory path for subscription mode",
),
sa.Column(
"session_token",
sa.String(500),
nullable=True,
comment="Session token for subscription mode (encrypted at application layer)",
),
sa.Column(
"model",
sa.String(100),
server_default="claude-sonnet-4-5",
nullable=False,
comment="Claude model identifier",
),
sa.Column(
"max_tokens",
sa.Integer(),
server_default="8000",
nullable=False,
comment="Maximum tokens per request",
),
sa.Column(
"temperature",
sa.Numeric(3, 2),
server_default="0.7",
nullable=False,
comment="Model temperature (0.00-1.00)",
),
sa.Column(
"monthly_budget_usd",
sa.Numeric(10, 2),
nullable=True,
comment="Monthly spending limit in USD",
),
sa.Column(
"is_active",
sa.Boolean(),
server_default="true",
nullable=False,
comment="Whether account is active and can be used",
),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=False,
comment="Creation timestamp",
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=False,
comment="Last update timestamp",
),
sa.Column(
"last_used_at",
sa.DateTime(timezone=True),
nullable=True,
comment="Last time this account was used for a task",
),
sa.PrimaryKeyConstraint("id"),
sa.CheckConstraint("account_type IN ('api', 'subscription')", name="check_account_type"),
sa.CheckConstraint(
"(account_type = 'api' AND api_key IS NOT NULL) OR "
"(account_type = 'subscription' AND config_directory IS NOT NULL)",
name="check_api_key_required",
),
sa.CheckConstraint(
"temperature >= 0.0 AND temperature <= 1.0", name="check_temperature_range"
),
sa.CheckConstraint("max_tokens > 0", name="check_max_tokens_positive"),
sa.CheckConstraint(
"monthly_budget_usd IS NULL OR monthly_budget_usd > 0",
name="check_monthly_budget_positive",
),
comment="Claude API credentials and configuration",
)
op.create_index("ix_claude_accounts_account_type", "claude_accounts", ["account_type"])
op.create_index("ix_claude_accounts_is_active", "claude_accounts", ["is_active"])
# =========================================================================
# 3. projects (FKs to framework_presets, claude_accounts)
# =========================================================================
op.create_table(
"projects",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column("name", sa.String(255), nullable=False, comment="Human-readable project name"),
sa.Column("slug", sa.String(100), nullable=False, comment="URL-safe unique identifier"),
sa.Column(
"repo_url",
sa.String(500),
nullable=False,
comment="Git repository URL (GitHub, GitLab, etc.)",
),
sa.Column(
"default_branch",
sa.String(100),
server_default="main",
nullable=False,
comment="Default git branch for task execution",
),
sa.Column(
"framework_preset_id",
postgresql.UUID(as_uuid=True),
sa.ForeignKey("framework_presets.id", ondelete="SET NULL"),
nullable=True,
comment="Reference to framework preset (optional)",
),
sa.Column(
"project_type",
sa.String(50),
nullable=False,
comment="Project type: python, nodejs, rust, godot, etc.",
),
sa.Column(
"test_command",
sa.String(500),
nullable=True,
comment="Custom test command (overrides preset)",
),
sa.Column(
"build_command",
sa.String(500),
nullable=True,
comment="Custom build command (overrides preset)",
),
sa.Column(
"lint_command",
sa.String(500),
nullable=True,
comment="Custom lint command (overrides preset)",
),
sa.Column(
"format_command",
sa.String(500),
nullable=True,
comment="Custom format command (overrides preset)",
),
sa.Column(
"automation_enabled",
sa.Boolean(),
server_default="false",
nullable=False,
comment="Whether automation is active for this project",
),
sa.Column(
"ready_state_name",
sa.String(100),
nullable=True,
comment="State name for ready tasks (e.g., 'Ready', 'To Do')",
),
sa.Column(
"in_progress_state_name",
sa.String(100),
server_default="In Progress",
nullable=False,
comment="State name for running tasks",
),
sa.Column(
"review_state_name",
sa.String(100),
server_default="In Review",
nullable=False,
comment="State name for tasks in review",
),
sa.Column(
"done_state_name",
sa.String(100),
server_default="Done",
nullable=False,
comment="State name for completed tasks",
),
sa.Column(
"max_concurrent_tasks",
sa.Integer(),
server_default="3",
nullable=False,
comment="Maximum number of parallel task executions",
),
sa.Column(
"task_timeout_seconds",
sa.Integer(),
server_default="1800",
nullable=False,
comment="Task execution timeout in seconds",
),
sa.Column(
"max_cost_per_task_usd",
sa.Numeric(10, 2),
server_default="5.00",
nullable=False,
comment="Maximum cost per task in USD",
),
sa.Column(
"daily_cost_limit_usd",
sa.Numeric(10, 2),
server_default="50.00",
nullable=False,
comment="Daily total cost limit in USD",
),
sa.Column(
"github_installation_id",
sa.BigInteger(),
nullable=True,
comment="GitHub App installation ID for this project",
),
sa.Column(
"gitlab_project_id",
sa.BigInteger(),
nullable=True,
comment="GitLab project ID for this project",
),
sa.Column(
"source_platform",
sa.String(50),
nullable=True,
comment="Source platform: github, gitlab, plane, etc.",
),
sa.Column(
"source_platform_url",
sa.String(500),
nullable=True,
comment="Platform URL for web UI integration",
),
sa.Column(
"claude_account_id",
postgresql.UUID(as_uuid=True),
sa.ForeignKey("claude_accounts.id", ondelete="SET NULL"),
nullable=True,
comment="Reference to Claude API account",
),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=False,
comment="Creation timestamp",
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=False,
comment="Last update timestamp",
),
sa.Column(
"deleted_at",
sa.DateTime(timezone=True),
nullable=True,
comment="Soft delete timestamp (NULL if active)",
),
sa.Column(
"search_vector",
postgresql.TSVECTOR(),
nullable=True,
comment="Full-text search vector (auto-generated from name + repo_url)",
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("slug"),
comment="Project configurations and automation settings",
)
op.create_index("ix_projects_slug", "projects", ["slug"])
op.create_index("ix_projects_automation_enabled", "projects", ["automation_enabled"])
op.create_index("ix_projects_source_platform", "projects", ["source_platform"])
op.create_index(
"idx_projects_search",
"projects",
["search_vector"],
postgresql_using="gin",
)
op.create_index(
"idx_projects_deleted_at",
"projects",
["deleted_at"],
postgresql_where=sa.text("deleted_at IS NULL"),
)
# =========================================================================
# 4. task_runs (FKs to projects, claude_accounts)
# =========================================================================
op.create_table(
"task_runs",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column(
"project_id",
postgresql.UUID(as_uuid=True),
sa.ForeignKey("projects.id", ondelete="CASCADE"),
nullable=False,
comment="Reference to project (cascade delete)",
),
sa.Column(
"claude_account_id",
postgresql.UUID(as_uuid=True),
sa.ForeignKey("claude_accounts.id", ondelete="SET NULL"),
nullable=True,
comment="Reference to Claude account used for execution",
),
sa.Column(
"work_item_id",
sa.String(255),
nullable=False,
comment="External work item ID (e.g., 'issue-42', 'JIRA-123')",
),
sa.Column(
"work_item_url", sa.String(500), nullable=True, comment="URL to work item on platform"
),
sa.Column("work_item_title", sa.String(500), nullable=True, comment="Work item title"),
sa.Column(
"work_item_description", sa.Text(), nullable=True, comment="Work item description/body"
),
sa.Column(
"task_type",
sa.String(50),
server_default="feature",
nullable=False,
comment="Task type: feature, bugfix, refactor, docs, etc.",
),
sa.Column(
"complexity",
sa.String(20),
nullable=True,
comment="Task complexity: simple, medium, complex",
),
sa.Column(
"prompt", sa.Text(), nullable=False, comment="Prompt sent to Claude for task execution"
),
sa.Column(
"status",
sa.String(50),
server_default="queued",
nullable=False,
comment="Execution status: queued, running, success, failed, cancelled, timeout",
),
sa.Column(
"started_at",
sa.DateTime(timezone=True),
nullable=True,
comment="When task execution started",
),
sa.Column(
"completed_at",
sa.DateTime(timezone=True),
nullable=True,
comment="When task execution completed",
),
sa.Column(
"duration_seconds",
sa.Integer(),
nullable=True,
comment="Total execution time in seconds (auto-calculated)",
),
sa.Column(
"retry_count",
sa.Integer(),
server_default="0",
nullable=False,
comment="Number of retries attempted",
),
sa.Column(
"max_retries",
sa.Integer(),
server_default="3",
nullable=False,
comment="Maximum retries allowed",
),
sa.Column(
"branch_name",
sa.String(255),
nullable=True,
comment="Git branch name created for this task",
),
sa.Column("worktree_path", sa.String(500), nullable=True, comment="Path to git worktree"),
sa.Column("commit_sha", sa.String(40), nullable=True, comment="Git commit SHA"),
sa.Column("pr_url", sa.String(500), nullable=True, comment="URL to created pull request"),
sa.Column(
"pr_number", sa.Integer(), nullable=True, comment="Pull request number on platform"
),
sa.Column(
"tests_passed",
sa.Boolean(),
nullable=True,
comment="Whether tests passed (NULL if not run)",
),
sa.Column("test_output", sa.Text(), nullable=True, comment="Test execution output"),
sa.Column(
"error_message", sa.Text(), nullable=True, comment="Error message if task failed"
),
sa.Column(
"tokens_used", sa.Integer(), nullable=True, comment="Total tokens consumed by Claude"
),
sa.Column("cost_usd", sa.Numeric(10, 4), nullable=True, comment="Total cost in USD"),
sa.Column(
"task_metadata",
postgresql.JSONB(astext_type=sa.Text()),
server_default="{}",
nullable=True,
comment="Additional task metadata as JSON",
),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=False,
comment="Creation timestamp",
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=False,
comment="Last update timestamp",
),
sa.PrimaryKeyConstraint("id"),
sa.CheckConstraint(
"status IN ('queued', 'running', 'success', 'failed', 'cancelled', 'timeout')",
name="check_status",
),
sa.CheckConstraint(
"complexity IS NULL OR complexity IN ('simple', 'medium', 'complex')",
name="check_complexity",
),
comment="Task execution records and results",
)
op.create_index("ix_task_runs_project_id", "task_runs", ["project_id"])
op.create_index("ix_task_runs_work_item_id", "task_runs", ["work_item_id"])
op.create_index("ix_task_runs_status", "task_runs", ["status"])
op.create_index("idx_task_runs_project_status", "task_runs", ["project_id", "status"])
op.create_index("idx_task_runs_created_at", "task_runs", [sa.text("created_at DESC")])
# =========================================================================
# 5. task_run_logs (FK to task_runs)
# =========================================================================
op.create_table(
"task_run_logs",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column(
"task_run_id",
postgresql.UUID(as_uuid=True),
sa.ForeignKey("task_runs.id", ondelete="CASCADE"),
nullable=False,
comment="Reference to task run (cascade delete)",
),
sa.Column(
"level",
sa.String(20),
nullable=False,
comment="Log level: debug, info, warning, error, critical",
),
sa.Column("message", sa.Text(), nullable=False, comment="Log message text"),
sa.Column(
"step",
sa.String(100),
nullable=True,
comment="Execution step: init, planning, implementation, testing, etc.",
),
sa.Column(
"tool_name",
sa.String(50),
nullable=True,
comment="Claude tool name: Read, Write, Edit, Bash, Grep, etc.",
),
sa.Column(
"log_metadata",
postgresql.JSONB(astext_type=sa.Text()),
server_default="{}",
nullable=True,
comment="Additional log metadata as JSON",
),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=False,
comment="Log timestamp",
),
sa.PrimaryKeyConstraint("id"),
sa.CheckConstraint(
"level IN ('debug', 'info', 'warning', 'error', 'critical')",
name="check_level",
),
comment="Detailed task execution logs",
)
op.create_index("ix_task_run_logs_task_run_id", "task_run_logs", ["task_run_id"])
op.create_index("ix_task_run_logs_level", "task_run_logs", ["level"])
op.create_index("ix_task_run_logs_created_at", "task_run_logs", ["created_at"])
op.create_index(
"idx_task_run_logs_task_run_created", "task_run_logs", ["task_run_id", "created_at"]
)
# =========================================================================
# 6. webhook_subscriptions (FK to projects)
# =========================================================================
op.create_table(
"webhook_subscriptions",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column("url", sa.String(500), nullable=False, comment="Webhook endpoint URL"),
sa.Column(
"secret",
sa.String(255),
nullable=False,
comment="Secret for HMAC signature verification",
),
sa.Column(
"project_id",
postgresql.UUID(as_uuid=True),
sa.ForeignKey("projects.id", ondelete="CASCADE"),
nullable=True,
comment="Project ID (NULL for global subscriptions)",
),
sa.Column(
"events",
postgresql.ARRAY(sa.Text()),
nullable=False,
comment="Array of event types",
),
sa.Column(
"is_active",
sa.Boolean(),
server_default="true",
nullable=False,
comment="Whether subscription is active",
),
sa.Column(
"last_triggered_at",
sa.DateTime(timezone=True),
nullable=True,
comment="Last webhook delivery time",
),
sa.Column(
"failure_count",
sa.Integer(),
server_default="0",
nullable=False,
comment="Number of consecutive failures",
),
sa.Column(
"last_failure_at",
sa.DateTime(timezone=True),
nullable=True,
comment="Last failure timestamp",
),
sa.Column("description", sa.Text(), nullable=True, comment="Subscription description"),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=False,
),
sa.PrimaryKeyConstraint("id"),
sa.CheckConstraint("url ~ '^https?://'", name="check_url_format"),
comment="Client webhook endpoint registrations",
)
op.create_index("ix_webhook_subscriptions_project_id", "webhook_subscriptions", ["project_id"])
op.create_index("ix_webhook_subscriptions_is_active", "webhook_subscriptions", ["is_active"])
op.create_index(
"idx_webhook_subscriptions_events",
"webhook_subscriptions",
["events"],
postgresql_using="gin",
)
# =========================================================================
# 7. daily_usage (FK to projects)
# =========================================================================
op.create_table(
"daily_usage",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column(
"project_id",
postgresql.UUID(as_uuid=True),
sa.ForeignKey("projects.id", ondelete="CASCADE"),
nullable=False,
),
sa.Column("date", sa.Date(), nullable=False),
sa.Column("tasks_queued", sa.Integer(), server_default="0", nullable=False),
sa.Column("tasks_completed", sa.Integer(), server_default="0", nullable=False),
sa.Column("tasks_failed", sa.Integer(), server_default="0", nullable=False),
sa.Column("total_tokens_used", sa.BigInteger(), server_default="0", nullable=False),
sa.Column("total_cost_usd", sa.Numeric(10, 4), server_default="0", nullable=False),
sa.Column("total_duration_seconds", sa.BigInteger(), server_default="0", nullable=False),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=False,
),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("project_id", "date", name="uq_daily_usage_project_date"),
comment="Daily usage tracking and billing",
)
op.create_index(
"idx_daily_usage_project_date", "daily_usage", ["project_id", sa.text("date DESC")]
)
op.create_index("idx_daily_usage_date", "daily_usage", [sa.text("date DESC")])
# =========================================================================
# 8. api_keys (FK to projects)
# =========================================================================
op.create_table(
"api_keys",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column(
"key_hash",
sa.String(64),
nullable=False,
comment="SHA-256 hash of actual key",
),
sa.Column(
"key_prefix",
sa.String(10),
nullable=False,
comment="First 8 chars for identification",
),
sa.Column("name", sa.String(255), nullable=False),
sa.Column(
"project_id",
postgresql.UUID(as_uuid=True),
sa.ForeignKey("projects.id", ondelete="CASCADE"),
nullable=True,
comment="Project ID (NULL for organization-level)",
),
sa.Column(
"scopes",
postgresql.ARRAY(sa.String()),
server_default=sa.text("'{read}'"),
nullable=False,
),
sa.Column("is_active", sa.Boolean(), server_default="true", nullable=False),
sa.Column("expires_at", sa.DateTime(timezone=True), nullable=True),
sa.Column("last_used_at", sa.DateTime(timezone=True), nullable=True),
sa.Column("created_by", sa.String(255), nullable=True),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("CURRENT_TIMESTAMP"),
nullable=False,
),
sa.Column("revoked_at", sa.DateTime(timezone=True), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("key_hash"),
sa.CheckConstraint(
"scopes <@ ARRAY['read', 'write', 'admin']::VARCHAR[]",
name="check_scopes",
),
comment="API authentication tokens",
)
op.create_index("ix_api_keys_key_hash", "api_keys", ["key_hash"])
op.create_index("ix_api_keys_key_prefix", "api_keys", ["key_prefix"])
op.create_index("ix_api_keys_project_id", "api_keys", ["project_id"])
op.create_index("ix_api_keys_is_active", "api_keys", ["is_active"])
def downgrade() -> None:
# Drop in reverse dependency order
op.drop_table("api_keys")
op.drop_table("daily_usage")
op.drop_table("webhook_subscriptions")
op.drop_table("task_run_logs")
op.drop_table("task_runs")
op.drop_table("projects")
op.drop_table("claude_accounts")
op.drop_table("framework_presets")
| """Initial v2.0 schema
Revision ID: eaf8e2359e19
Revises:
Create Date: 2026-03-25 00:00:00.000000
Creates all 8 tables for Lazy-Bird v2.0:
1. framework_presets - Framework-specific command presets
2. claude_accounts - Claude API credentials and settings
3. projects - Project configurations and settings
4. task_runs - Task execution records
5. task_run_logs - Detailed task execution logs
6. webhook_subscriptions - Webhook endpoint registrations
7. daily_usage - Daily usage tracking and billing
8. api_keys - API authentication tokens
"""
from typing import Sequence, Union
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "eaf8e2359e19"
down_revision: Union[str, None] = None
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# =========================================================================
# 1. framework_presets (no FKs, referenced by projects)
# =========================================================================
op.create_table(
"framework_presets",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("gen_random_uuid()"),
nullable=False,
),
sa.Column | [] | yusufkaraaslan/lazy-bird | alembic/versions/20260325_0000-eaf8e2359e19_initial_v2_0_schema.py |
"""Alembic environment configuration for Lazy-Bird v2.0
This module configures Alembic for database migrations.
It loads the database URL from environment variables and
imports all SQLAlchemy models for autogenerate support.
Supports both sync and async engines. For local development,
set DATABASE_URL env var (e.g., sqlite:///./test.db for quick testing).
"""
import os
import sys
from logging.config import fileConfig
from pathlib import Path
from sqlalchemy import engine_from_config, pool
from alembic import context
# Add project root to path so lazy_bird package is importable
sys.path.insert(0, str(Path(__file__).resolve().parents[1]))
# ---------------------------------------------------------------------------
# Import Base and all models
# ---------------------------------------------------------------------------
# We import Base directly from the declarative_base definition.
# We must also import all models so they register with Base.metadata,
# which is required for autogenerate to detect schema changes.
#
# NOTE: lazy_bird.core.database creates engine objects at import time
# using settings.DATABASE_URL. When running Alembic we override the URL
# below via the environment variable DATABASE_URL (or fall back to the
# settings value). The engines created at import time are NOT used by
# Alembic -- Alembic creates its own engine from the [alembic] config.
# ---------------------------------------------------------------------------
# Determine the database URL *before* importing heavy modules so we can
# set it in the environment for Settings to pick up.
_db_url = os.environ.get("DATABASE_URL")
if _db_url:
# Ensure pydantic-settings sees this when it loads
os.environ["DATABASE_URL"] = _db_url
# Now import -- this may trigger Settings() and engine creation, but
# those side-effects won't affect Alembic's own engine.
from lazy_bird.core.database import Base # noqa: E402
from lazy_bird.models import * # noqa: F401, F403, E402
# Also grab settings so we can read DATABASE_URL if env var wasn't set
from lazy_bird.core.config import settings # noqa: E402
# ---------------------------------------------------------------------------
# Alembic Config object
# ---------------------------------------------------------------------------
config = context.config
# Interpret the config file for Python logging.
if config.config_file_name is not None:
fileConfig(config.config_file_name)
# Override sqlalchemy.url with the resolved database URL.
# Priority: DATABASE_URL env var > settings.DATABASE_URL > alembic.ini value
database_url = os.environ.get("DATABASE_URL", settings.DATABASE_URL)
config.set_main_option("sqlalchemy.url", database_url)
# Model MetaData for autogenerate support
target_metadata = Base.metadata
# ---------------------------------------------------------------------------
# Migration runners
# ---------------------------------------------------------------------------
def run_migrations_offline() -> None:
"""Run migrations in 'offline' mode.
This configures the context with just a URL and not an Engine,
so we don't need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(
url=url,
target_metadata=target_metadata,
literal_binds=True,
dialect_opts={"paramstyle": "named"},
compare_type=True,
compare_server_default=True,
include_schemas=False,
)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online() -> None:
"""Run migrations in 'online' mode.
In this scenario we create an Engine and associate a connection
with the context.
"""
connectable = engine_from_config(
config.get_section(config.config_ini_section, {}),
prefix="sqlalchemy.",
poolclass=pool.NullPool,
)
with connectable.connect() as connection:
context.configure(
connection=connection,
target_metadata=target_metadata,
compare_type=True,
compare_server_default=True,
include_schemas=False,
)
with context.begin_transaction():
context.run_migrations()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| """Alembic environment configuration for Lazy-Bird v2.0
This module configures Alembic for database migrations.
It loads the database URL from environment variables and
imports all SQLAlchemy models for autogenerate support.
Supports both sync and async engines. For local development,
set DATABASE_URL env var (e.g., sqlite:///./test.db for quick testing).
"""
import os
import sys
from logging.config import fileConfig
from pathlib import Path
from sqlalchemy import engine_from_config, pool
from alembic import context
# Add project root to path so lazy_bird package is importable
sys.path.insert(0, str(Path(__file__).resolve().parents[1]))
# ---------------------------------------------------------------------------
# Import Base and all models
# ---------------------------------------------------------------------------
# We import Base directly from the declarative_base definition.
# We must also import all models so they register with Base.metadata,
# which is required for autogenerate to detect schema changes.
#
# NOTE: lazy_bird.core.database creates engine objects at import time
# using settings.DATABASE_URL. When running Alembic we override the URL
# below via the environment variable DATABASE_URL (or fall back to the
# settings value). The engines created at import time are NOT used by
# Alembic -- Alembic creates its own engine from the [alembic] config.
# ---------------------------------------------------------------------------
# Determine the database URL *before* importing heavy modules so we can
# set it in the environment for Settings to pick up.
_db_url = os.environ.get("DATABASE_URL")
if _db_url:
# Ensure pydantic-settings sees this when it loads
os.environ["DATABASE_URL"] = _db_url
# Now import -- this may trigger Settings() and engine creation, but
# those side-effects won't affect Alembic's own engine.
from lazy_bird.core.database import Base # noqa: E402
from lazy_bird.models import * # noqa: F401, F403, E402 | [
"# sqlalchemy/sqlalchemy:lib/sqlalchemy/engine/create.py\nengine_from_config"
] | yusufkaraaslan/lazy-bird | alembic/env.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.