Christophe Bourgoin
Add development environment setup with uv, pyproject.toml, and Makefile
918983a
"""Unit tests for custom tools."""
import pytest
from src.tools import (
analyze_content_for_opportunities,
create_engagement_hooks,
extract_key_findings,
format_for_platform,
generate_citations,
generate_seo_keywords,
search_industry_trends,
)
class TestFormatForPlatform:
"""Tests for format_for_platform tool."""
@pytest.mark.unit
def test_format_blog(self):
"""Test blog formatting."""
result = format_for_platform("Test content", "blog", "AI Research")
assert result["status"] == "success"
assert result["platform"] == "blog"
assert "markdown" in result["metadata"]["format"]
assert "AI Research" in result["formatted_content"]
@pytest.mark.unit
def test_format_linkedin(self):
"""Test LinkedIn formatting."""
result = format_for_platform("Test content", "linkedin", "ML Topic")
assert result["status"] == "success"
assert result["platform"] == "linkedin"
assert "Key Takeaways" in result["formatted_content"]
@pytest.mark.unit
def test_format_twitter(self):
"""Test Twitter formatting."""
result = format_for_platform("Test content", "twitter", "AI News")
assert result["status"] == "success"
assert result["platform"] == "twitter"
assert "Thread" in result["formatted_content"]
@pytest.mark.unit
def test_invalid_platform(self):
"""Test invalid platform error."""
result = format_for_platform("Test content", "invalid", "Topic")
assert result["status"] == "error"
assert "Unsupported platform" in result["error_message"]
class TestGenerateCitations:
"""Tests for generate_citations tool."""
@pytest.mark.unit
def test_apa_citations(self):
"""Test APA citation generation."""
sources = [
{
"title": "Test Paper",
"authors": "Smith, J.",
"link": "https://arxiv.org/abs/123",
"year": "2024",
}
]
result = generate_citations(sources, "apa")
assert result["status"] == "success"
assert len(result["citations"]) == 1
assert "Smith, J." in result["citations"][0]
assert "(2024)" in result["citations"][0]
@pytest.mark.unit
def test_empty_sources(self):
"""Test error with no sources."""
result = generate_citations([])
assert result["status"] == "error"
class TestExtractKeyFindings:
"""Tests for extract_key_findings tool."""
@pytest.mark.unit
def test_extract_findings(self):
"""Test key findings extraction."""
text = "Research found that AI improves efficiency. Studies showed significant results."
result = extract_key_findings(text, max_findings=2)
assert result["status"] == "success"
assert len(result["findings"]) <= 2
@pytest.mark.unit
def test_insufficient_text(self):
"""Test error with short text."""
result = extract_key_findings("Too short", max_findings=5)
assert result["status"] == "error"
class TestGenerateSeoKeywords:
"""Tests for generate_seo_keywords tool."""
@pytest.mark.unit
def test_keyword_generation(self):
"""Test SEO keyword generation."""
result = generate_seo_keywords("Machine Learning", "AI Consultant")
assert result["status"] == "success"
assert len(result["primary_keywords"]) > 0
assert len(result["technical_keywords"]) > 0
assert "AI Consultant" in result["primary_keywords"]
class TestCreateEngagementHooks:
"""Tests for create_engagement_hooks tool."""
@pytest.mark.unit
def test_opportunities_goal(self):
"""Test hooks for opportunities goal."""
result = create_engagement_hooks("AI Agents", "opportunities")
assert result["status"] == "success"
assert len(result["opening_hooks"]) > 0
assert len(result["closing_ctas"]) > 0
assert result["goal"] == "opportunities"
@pytest.mark.unit
def test_discussion_goal(self):
"""Test hooks for discussion goal."""
result = create_engagement_hooks("NLP", "discussion")
assert result["status"] == "success"
assert len(result["discussion_questions"]) > 0
class TestAnalyzeContentForOpportunities:
"""Tests for analyze_content_for_opportunities tool."""
@pytest.mark.unit
def test_content_analysis(self):
"""Test content opportunity analysis."""
content = """
As an AI Consultant specializing in Machine Learning, I've built production systems
using PyTorch and TensorFlow. Let's connect to discuss how AI can solve your business problems.
Check out my GitHub for real-world implementations.
"""
result = analyze_content_for_opportunities(content, "AI Consultant")
assert result["status"] == "success"
assert "opportunity_score" in result
assert "seo_score" in result
assert "engagement_score" in result
assert 0 <= result["opportunity_score"] <= 100
@pytest.mark.unit
def test_short_content_error(self):
"""Test error with too short content."""
result = analyze_content_for_opportunities("Too short")
assert result["status"] == "error"
class TestSearchIndustryTrends:
"""Tests for search_industry_trends tool."""
@pytest.mark.integration
@pytest.mark.slow
def test_trend_search(self):
"""Test industry trend search (requires internet)."""
result = search_industry_trends("Machine Learning", "global", max_results=3)
assert result["status"] == "success"
assert "trends" in result
assert "hot_skills" in result
assert len(result["hot_skills"]) > 0