Spaces:
Sleeping
Sleeping
| #!/usr/bin/env python3 | |
| # -*- coding: utf-8 -*- | |
| """ | |
| Unit tests for the Report Generator Service | |
| """ | |
| import unittest | |
| from unittest.mock import patch, MagicMock, mock_open | |
| import os | |
| import sys | |
| import json | |
| from pathlib import Path | |
| # Add the project root directory to the Python path | |
| project_root = Path(__file__).resolve().parent.parent | |
| sys.path.insert(0, str(project_root)) | |
| from src.services.report_generator import ReportGenerator | |
| class TestReportGenerator(unittest.TestCase): | |
| """Test cases for the ReportGenerator class""" | |
| def setUp(self): | |
| """Set up test fixtures""" | |
| # Create a temporary output directory for testing | |
| self.test_output_dir = "test_reports" | |
| self.generator = ReportGenerator(output_dir=self.test_output_dir) | |
| # Sample test data | |
| self.repo_name = "test-repo" | |
| self.test_results = { | |
| "repository_info": { | |
| "branch": "main", | |
| "commit": "abc123", | |
| "remote_url": "https://github.com/test/test-repo", | |
| "size": 1024, | |
| "file_count": 10 | |
| }, | |
| "language_breakdown": { | |
| "Python": {"files": 5, "lines": 500, "percentage": 70}, | |
| "JavaScript": {"files": 3, "lines": 200, "percentage": 30} | |
| }, | |
| "code_analysis": { | |
| "Python": { | |
| "issue_count": 3, | |
| "issues": [ | |
| {"severity": "high", "issue": "Unused variable", "file": "test.py", "line": 10, "description": "Variable 'x' is not used"}, | |
| {"severity": "medium", "issue": "Missing docstring", "file": "test.py", "line": 5, "description": "Function missing docstring"} | |
| ] | |
| }, | |
| "JavaScript": { | |
| "issue_count": 2, | |
| "issues": [ | |
| {"severity": "medium", "issue": "Unused variable", "file": "test.js", "line": 15, "description": "Variable 'y' is not used"} | |
| ] | |
| } | |
| }, | |
| "security_scan": { | |
| "Python": { | |
| "vulnerability_count": 1, | |
| "vulnerabilities": [ | |
| {"severity": "critical", "issue": "SQL Injection", "file": "db.py", "line": 25, "description": "Unsanitized SQL query"} | |
| ] | |
| }, | |
| "JavaScript": { | |
| "vulnerability_count": 0, | |
| "vulnerabilities": [] | |
| } | |
| }, | |
| "performance_analysis": { | |
| "language_results": { | |
| "Python": { | |
| "issue_count": 2, | |
| "issues": [ | |
| {"issue": "Inefficient loop", "file": "test.py", "line": 20, "description": "Use list comprehension instead"} | |
| ] | |
| } | |
| }, | |
| "hotspots": [ | |
| {"file": "test.py", "language": "Python", "issue_count": 2} | |
| ] | |
| }, | |
| "ai_review": { | |
| "reviews": { | |
| "test.py": { | |
| "status": "success", | |
| "review_text": "Code review for test.py", | |
| "suggestions": [ | |
| {"section": "Code Quality", "line": 10, "description": "Variable 'x' is not used", "details": "Remove unused variable"} | |
| ] | |
| } | |
| }, | |
| "summary": "Overall, the code quality is good but there are some issues to address." | |
| } | |
| } | |
| def tearDown(self): | |
| """Tear down test fixtures""" | |
| # Clean up the test output directory | |
| if os.path.exists(self.test_output_dir): | |
| for file in os.listdir(self.test_output_dir): | |
| os.remove(os.path.join(self.test_output_dir, file)) | |
| os.rmdir(self.test_output_dir) | |
| def test_init(self): | |
| """Test initialization of the generator""" | |
| self.assertIsNotNone(self.generator) | |
| self.assertEqual(self.generator.output_dir, self.test_output_dir) | |
| self.assertTrue(os.path.exists(self.test_output_dir)) | |
| def test_generate_json_report(self, mock_json_dump, mock_file_open): | |
| """Test _generate_json_report method""" | |
| # Call the method | |
| report_content = {"test": "content"} | |
| report_path = self.generator._generate_json_report("test_report", report_content) | |
| # Verify the result | |
| expected_path = os.path.join(self.test_output_dir, "test_report.json") | |
| self.assertEqual(report_path, expected_path) | |
| mock_file_open.assert_called_once_with(expected_path, "w", encoding="utf-8") | |
| mock_json_dump.assert_called_once() | |
| def test_generate_html_report(self, mock_markdown, mock_file_open): | |
| """Test _generate_html_report method""" | |
| # Mock markdown conversion | |
| mock_markdown.return_value = "<h1>Test</h1>" | |
| # Call the method | |
| report_content = {"metadata": {"repository_name": "test-repo"}} | |
| report_path = self.generator._generate_html_report("test_report", report_content) | |
| # Verify the result | |
| expected_path = os.path.join(self.test_output_dir, "test_report.html") | |
| self.assertEqual(report_path, expected_path) | |
| mock_file_open.assert_called_once_with(expected_path, "w", encoding="utf-8") | |
| mock_markdown.assert_called_once() | |
| def test_generate_pdf_report(self, mock_remove, mock_pdfkit): | |
| """Test _generate_pdf_report method""" | |
| # Mock the HTML report generation | |
| with patch.object(self.generator, '_generate_html_report') as mock_html_report: | |
| mock_html_report.return_value = os.path.join(self.test_output_dir, "test_report_temp.html") | |
| # Call the method | |
| report_content = {"test": "content"} | |
| report_path = self.generator._generate_pdf_report("test_report", report_content) | |
| # Verify the result | |
| expected_path = os.path.join(self.test_output_dir, "test_report.pdf") | |
| self.assertEqual(report_path, expected_path) | |
| mock_html_report.assert_called_once_with("test_report_temp", report_content) | |
| mock_pdfkit.assert_called_once_with( | |
| os.path.join(self.test_output_dir, "test_report_temp.html"), | |
| expected_path | |
| ) | |
| mock_remove.assert_called_once_with(os.path.join(self.test_output_dir, "test_report_temp.html")) | |
| def test_generate_csv_report(self, mock_csv_writer, mock_file_open): | |
| """Test _generate_csv_report method""" | |
| # Mock CSV writer | |
| mock_writer = MagicMock() | |
| mock_csv_writer.return_value = mock_writer | |
| # Call the method | |
| report_content = { | |
| "code_quality": {"issues_by_language": {}}, | |
| "security": {"vulnerabilities_by_language": {}}, | |
| "performance": {"issues_by_language": {}}, | |
| "ai_review": {"file_reviews": {}} | |
| } | |
| report_path = self.generator._generate_csv_report("test_report", report_content) | |
| # Verify the result | |
| expected_path = os.path.join(self.test_output_dir, "test_report.csv") | |
| self.assertEqual(report_path, expected_path) | |
| mock_file_open.assert_called_once_with(expected_path, "w", newline="", encoding="utf-8") | |
| mock_writer.writeheader.assert_called_once() | |
| mock_writer.writerows.assert_called_once() | |
| def test_calculate_summary_metrics(self): | |
| """Test _calculate_summary_metrics method""" | |
| # Call the method | |
| metrics = self.generator._calculate_summary_metrics(self.test_results) | |
| # Verify the result | |
| self.assertEqual(metrics["total_files"], 10) | |
| self.assertEqual(metrics["repository_size"], 1024) | |
| self.assertEqual(metrics["total_code_issues"], 5) # 3 Python + 2 JavaScript | |
| self.assertEqual(metrics["critical_code_issues"], 1) # 1 high severity issue | |
| self.assertEqual(metrics["total_vulnerabilities"], 1) # 1 Python vulnerability | |
| self.assertEqual(metrics["critical_vulnerabilities"], 1) # 1 critical vulnerability | |
| self.assertEqual(metrics["total_performance_issues"], 2) # 2 Python performance issues | |
| self.assertEqual(metrics["performance_hotspots"], 1) # 1 hotspot | |
| self.assertIn("overall_score", metrics) | |
| self.assertIn("quality_rating", metrics) | |
| def test_extract_top_issues(self): | |
| """Test _extract_top_issues method""" | |
| # Call the method | |
| top_issues = self.generator._extract_top_issues(self.test_results["code_analysis"]) | |
| # Verify the result | |
| self.assertEqual(len(top_issues), 3) # Total issues in the test data | |
| self.assertEqual(top_issues[0]["severity"], "high") # First issue should be high severity | |
| def test_extract_critical_vulnerabilities(self): | |
| """Test _extract_critical_vulnerabilities method""" | |
| # Call the method | |
| critical_vulns = self.generator._extract_critical_vulnerabilities(self.test_results["security_scan"]) | |
| # Verify the result | |
| self.assertEqual(len(critical_vulns), 1) # Only one vulnerability in the test data | |
| self.assertEqual(critical_vulns[0]["severity"], "critical") | |
| def test_generate_recommendations(self): | |
| """Test _generate_recommendations method""" | |
| # Call the method | |
| recommendations = self.generator._generate_recommendations(self.test_results) | |
| # Verify the result | |
| self.assertIn("high_priority", recommendations) | |
| self.assertIn("medium_priority", recommendations) | |
| self.assertIn("low_priority", recommendations) | |
| self.assertEqual(len(recommendations["high_priority"]), 1) # One critical security vulnerability | |
| self.assertGreaterEqual(len(recommendations["medium_priority"]), 1) # At least one high code issue | |
| def test_generate_report(self, mock_listdir, mock_exists): | |
| """Test generate_report method""" | |
| # Mock the report generation methods | |
| with patch.object(self.generator, '_create_report_content') as mock_create_content, \ | |
| patch.object(self.generator, '_generate_json_report') as mock_json_report, \ | |
| patch.object(self.generator, '_generate_html_report') as mock_html_report, \ | |
| patch.object(self.generator, '_generate_pdf_report') as mock_pdf_report, \ | |
| patch.object(self.generator, '_generate_csv_report') as mock_csv_report: | |
| # Set up the mocks | |
| mock_create_content.return_value = {"test": "content"} | |
| mock_json_report.return_value = "json_path" | |
| mock_html_report.return_value = "html_path" | |
| mock_pdf_report.return_value = "pdf_path" | |
| mock_csv_report.return_value = "csv_path" | |
| # Call the method with all formats | |
| report_paths = self.generator.generate_report(self.repo_name, self.test_results, "all") | |
| # Verify the result | |
| self.assertEqual(report_paths["json"], "json_path") | |
| self.assertEqual(report_paths["html"], "html_path") | |
| self.assertEqual(report_paths["pdf"], "pdf_path") | |
| self.assertEqual(report_paths["csv"], "csv_path") | |
| mock_create_content.assert_called_once_with(self.repo_name, self.test_results) | |
| # Call the method with specific format | |
| report_paths = self.generator.generate_report(self.repo_name, self.test_results, "json") | |
| # Verify the result | |
| self.assertEqual(len(report_paths), 1) | |
| self.assertEqual(report_paths["json"], "json_path") | |
| if __name__ == "__main__": | |
| unittest.main() |