"""LLM-based code analyzer for pull request review."""

import json
import logging
import re
from typing import List, Optional

from openai import OpenAI

from ..config import LLMConfig
from ..models import Issue, ReviewIssue, ReviewResult

logger = logging.getLogger(__name__)


class LLMAnalyzer:
    """LLM-based code analyzer."""

    def __init__(self, config: LLMConfig):
        """
        Initialize LLM analyzer.

        Args:
            config: LLM configuration
        """
        self.config = config
        self.client = OpenAI(api_key=config.api_key, base_url=config.base_url)

    def review_code(
        self,
        repo_name: str,
        pr_number: int,
        pr_title: str,
        pr_body: str,
        pr_url: str,
        code_diff: str,
        issues: Optional[List[Issue]] = None,
    ) -> ReviewResult:
        """
        Review code changes using LLM.

        Args:
            repo_name: Repository name
            pr_number: Pull request number
            pr_title: Pull request title
            pr_body: Pull request description
            pr_url: Pull request URL
            code_diff: Code diff string
            issues: Associated issues (optional)

        Returns:
            ReviewResult object
        """
        logger.info(f"Starting LLM review for PR {repo_name}#{pr_number}")

        # Build prompt
        prompt = self._build_review_prompt(pr_title, pr_body, code_diff, issues)

        try:
            # Call LLM
            response = self.client.chat.completions.create(
                model=self.config.model,
                messages=[
                    {
                        "role": "system",
                        "content": "你是一位资深的代码审查专家，具有丰富的安全、代码质量和软件工程经验。",
                    },
                    {"role": "user", "content": prompt},
                ],
                temperature=self.config.temperature,
                max_tokens=self.config.max_tokens,
            )

            content = response.choices[0].message.content
            logger.info(f"Received LLM response for PR {repo_name}#{pr_number}")

            # Print LLM response to console for debugging
            print("\n" + "=" * 80)
            print(f"LLM Response for PR {repo_name}#{pr_number}: {pr_title}")
            print("=" * 80)
            print(content)
            print("=" * 80 + "\n")

            # Parse response
            result = self._parse_review_response(
                repo_name, pr_number, pr_title, pr_url, content, issues
            )

            return result

        except Exception as e:
            logger.error(f"LLM review failed for PR {repo_name}#{pr_number}: {e}")
            # Return a default result on error
            return ReviewResult(
                repo_name=repo_name,
                pr_number=pr_number,
                pr_title=pr_title,
                pr_url=pr_url,
                summary=f"代码审查过程中发生错误: {str(e)}",
                severity="medium",
                severity_score=0.5,
            )

    def _build_review_prompt(
        self,
        pr_title: str,
        pr_body: str,
        code_diff: str,
        issues: Optional[List[Issue]] = None,
    ) -> str:
        """
        Build review prompt for LLM.

        Args:
            pr_title: PR title
            pr_body: PR body
            code_diff: Code diff
            issues: Associated issues

        Returns:
            Prompt string
        """
        prompt_parts = [
            "你是一位资深的代码审查专家。请对以下 Pull Request 进行全面的代码审查。",
            "",
            "## PR 信息",
            f"- PR 标题: {pr_title}",
            f"- PR 描述: {pr_body[:500] if pr_body else '无描述'}",
            "",
        ]

        # Add issue information if available
        if issues:
            prompt_parts.extend([
                "## 关联的 Issue 需求",
            ])
            for issue in issues:
                prompt_parts.extend([
                    f"### Issue #{issue.number}: {issue.title}",
                    f"{issue.body[:1000]}...",  # Limit issue body length
                    "",
                ])
            prompt_parts.append(
                "请特别关注代码实现是否满足上述 Issue 中的需求描述。"
            )
            prompt_parts.append("")

        # Add code diff
        prompt_parts.extend([
            "## 代码变更",
            "```diff",
            code_diff[:8000],  # Limit diff length to avoid token limits
            "```",
            "",
            "## 审查重点",
            "请从以下维度进行审查：",
            "1. **安全性**: 是否存在安全漏洞、注入风险、权限问题等",
            "2. **代码质量**: 是否符合 Clean Code 原则，代码可读性、可维护性",
            "3. **业务完备性**: 是否完整实现了业务需求，是否有遗漏",
            "4. **异常处理**: 是否考虑了边界情况、异常场景、错误处理",
            "5. **性能**: 是否存在性能问题、资源泄漏等",
            "6. **测试**: 是否有足够的测试覆盖",
            "",
            "## 输出要求",
            "请按照以下 JSON 格式输出审查结果：",
            "```json",
            "{",
            '  "summary": "总体评价，1-2句话",',
            '  "severity_score": 0.0-1.0之间的浮点数,',
            '  "critical_issues": [',
            '    {',
            '      "description": "问题描述",',
            '      "location": "文件路径:行号",',
            '      "suggestion": "建议修复方案的文字说明",',
            '      "before_code": "修改前的代码片段（如果有问题代码）",',
            '      "after_code": "修改后的代码片段（必须提供实际代码或伪代码）"',
            '    }',
            "  ],",
            '  "high_issues": [...],',
            '  "medium_issues": [...],',
            '  "low_issues": [...],',
            '  "positives": ["值得肯定的地方1", "值得肯定的地方2"],',
            '  "suggestions": ["改进建议1", "改进建议2"]',
            "}",
            "```",
            "",
            "**重要提示**:",
            "- 对于每个问题，如果涉及代码修改，请务必提供 `before_code` 和 `after_code` 字段",
            "- `before_code` 应包含有问题的原始代码片段（3-15行，包含足够的上下文）",
            "- `after_code` **必须提供具体的修复代码**，可以是：",
            "  1. **实际可执行的代码**：提供完整的修复后的代码片段",
            "  2. **伪代码**：如果无法提供完整代码，使用伪代码展示修复思路和关键修改点",
            "  3. **代码示例**：提供类似的代码示例说明如何修复",
            "- `after_code` 不应只是文字描述，必须包含代码或伪代码",
            "- 代码片段应保持原有的缩进和格式",
            "- 如果问题不涉及具体代码修改（如文档、测试等），可以将 `before_code` 和 `after_code` 设为空字符串",
            "- 伪代码格式示例：",
            "  ```",
            "  // 伪代码示例",
            "  if (condition) {",
            "      // 修复逻辑",
            "      fixed_code_here",
            "  }",
            "  ```",
        ])

        return "\n".join(prompt_parts)

    def _parse_review_response(
        self,
        repo_name: str,
        pr_number: int,
        pr_title: str,
        pr_url: str,
        content: str,
        issues: Optional[List[Issue]] = None,
    ) -> ReviewResult:
        """
        Parse LLM response into ReviewResult.

        Args:
            repo_name: Repository name
            pr_number: PR number
            pr_title: PR title
            pr_url: PR URL
            content: LLM response content
            issues: Associated issues

        Returns:
            ReviewResult object
        """
        # Try to extract JSON from response
        json_match = re.search(r"```json\s*(\{.*?\})\s*```", content, re.DOTALL)
        if not json_match:
            # Try to find JSON without code block
            json_match = re.search(r"\{.*\}", content, re.DOTALL)

        if json_match:
            try:
                data = json.loads(json_match.group(1))
                return self._build_review_result_from_json(
                    repo_name, pr_number, pr_title, pr_url, data, issues
                )
            except json.JSONDecodeError as e:
                logger.warning(f"Failed to parse JSON from LLM response: {e}")

        # Fallback: parse text response
        return self._parse_text_response(
            repo_name, pr_number, pr_title, pr_url, content, issues
        )

    def _build_review_result_from_json(
        self,
        repo_name: str,
        pr_number: int,
        pr_title: str,
        pr_url: str,
        data: dict,
        issues: Optional[List[Issue]] = None,
    ) -> ReviewResult:
        """Build ReviewResult from parsed JSON data."""
        severity_score = float(data.get("severity_score", 0.5))
        severity = self._assess_severity(severity_score)

        # Parse issues by severity
        critical_issues = [
            ReviewIssue(
                description=item.get("description", ""),
                location=item.get("location", ""),
                suggestion=item.get("suggestion", ""),
                severity="critical",
                before_code=item.get("before_code", ""),
                after_code=item.get("after_code", ""),
            )
            for item in data.get("critical_issues", [])
        ]

        high_issues = [
            ReviewIssue(
                description=item.get("description", ""),
                location=item.get("location", ""),
                suggestion=item.get("suggestion", ""),
                severity="high",
                before_code=item.get("before_code", ""),
                after_code=item.get("after_code", ""),
            )
            for item in data.get("high_issues", [])
        ]

        medium_issues = [
            ReviewIssue(
                description=item.get("description", ""),
                location=item.get("location", ""),
                suggestion=item.get("suggestion", ""),
                severity="medium",
                before_code=item.get("before_code", ""),
                after_code=item.get("after_code", ""),
            )
            for item in data.get("medium_issues", [])
        ]

        low_issues = [
            ReviewIssue(
                description=item.get("description", ""),
                location=item.get("location", ""),
                suggestion=item.get("suggestion", ""),
                severity="low",
                before_code=item.get("before_code", ""),
                after_code=item.get("after_code", ""),
            )
            for item in data.get("low_issues", [])
        ]

        # Generate issue comparison if issues exist
        issue_comparison = ""
        if issues:
            issue_comparison = self._compare_with_issues(issues, data.get("summary", ""))

        return ReviewResult(
            repo_name=repo_name,
            pr_number=pr_number,
            pr_title=pr_title,
            pr_url=pr_url,
            summary=data.get("summary", ""),
            severity=severity,
            severity_score=severity_score,
            critical_issues=critical_issues,
            high_issues=high_issues,
            medium_issues=medium_issues,
            low_issues=low_issues,
            positives=data.get("positives", []),
            suggestions=data.get("suggestions", []),
            issue_comparison=issue_comparison,
        )

    def _parse_text_response(
        self,
        repo_name: str,
        pr_number: int,
        pr_title: str,
        pr_url: str,
        content: str,
        issues: Optional[List[Issue]] = None,
    ) -> ReviewResult:
        """Parse text response when JSON parsing fails."""
        # Simple text parsing fallback
        summary = content[:500] if content else "无法解析审查结果"
        severity_score = 0.5
        severity = "medium"

        return ReviewResult(
            repo_name=repo_name,
            pr_number=pr_number,
            pr_title=pr_title,
            pr_url=pr_url,
            summary=summary,
            severity=severity,
            severity_score=severity_score,
        )

    def _assess_severity(self, score: float) -> str:
        """
        Assess severity level from score.

        Args:
            score: Severity score (0-1)

        Returns:
            Severity level string
        """
        if score >= 0.8:
            return "critical"
        elif score >= 0.6:
            return "high"
        elif score >= 0.4:
            return "medium"
        else:
            return "low"

    def _compare_with_issues(self, issues: List[Issue], review_summary: str) -> str:
        """
        Compare code implementation with issue requirements.

        Args:
            issues: List of associated issues
            review_summary: Review summary from LLM

        Returns:
            Comparison text
        """
        if not issues:
            return ""

        issue_descriptions = "\n".join([f"Issue #{issue.number}: {issue.title}" for issue in issues])
        return f"关联的 Issue: {issue_descriptions}\n\n审查总结中已考虑需求实现情况。"

    def generate_markdown_report(self, result: ReviewResult) -> str:
        """
        Generate Markdown format report from review result.

        Args:
            result: ReviewResult object

        Returns:
            Markdown formatted report
        """
        lines = [
            "# 代码审查报告",
            "",
            "## PR 信息",
            f"- **仓库**: {result.repo_name}",
            f"- **PR 编号**: #{result.pr_number}",
            f"- **标题**: {result.pr_title}",
            f"- **链接**: {result.pr_url}",
            f"- **审查时间**: {result.generated_at.strftime('%Y-%m-%d %H:%M:%S')}",
            "",
            "## 审查总结",
            result.summary,
            "",
            f"## 严重程度评估",
            f"- **级别**: {result.severity.upper()}",
            f"- **评分**: {result.severity_score:.2f}/1.0",
            "",
        ]

        # Helper function to detect language from file path
        def detect_language(file_path: str) -> str:
            """Detect programming language from file path."""
            if not file_path:
                return ""
            
            # Extract file extension
            if ":" in file_path:
                file_path = file_path.split(":")[0]  # Remove line number if present
            
            file_path = file_path.strip()
            if "." not in file_path:
                return ""
            
            ext = file_path.split(".")[-1].lower()
            
            # Language mapping
            lang_map = {
                "lua": "lua",
                "py": "python",
                "js": "javascript",
                "ts": "typescript",
                "jsx": "jsx",
                "tsx": "tsx",
                "java": "java",
                "cpp": "cpp",
                "cc": "cpp",
                "cxx": "cpp",
                "c": "c",
                "h": "c",
                "hpp": "cpp",
                "go": "go",
                "rs": "rust",
                "rb": "ruby",
                "php": "php",
                "sh": "bash",
                "bash": "bash",
                "yaml": "yaml",
                "yml": "yaml",
                "json": "json",
                "xml": "xml",
                "html": "html",
                "css": "css",
                "sql": "sql",
                "swift": "swift",
                "kt": "kotlin",
                "scala": "scala",
                "r": "r",
                "m": "objectivec",
                "mm": "objectivec",
            }
            
            return lang_map.get(ext, "")

        # Helper function to format issue with code comparison
        def format_issue(issue: ReviewIssue, include_code: bool = True) -> List[str]:
            """Format a single issue with optional code comparison."""
            issue_lines = [
                f"- **{issue.location}**",
                f"  - 问题: {issue.description}",
                f"  - 建议: {issue.suggestion}",
            ]
            
            # Add code comparison if available
            if include_code and (issue.before_code or issue.after_code):
                # Detect language from file location
                lang = detect_language(issue.location)
                lang_suffix = f"{lang}" if lang else ""
                
                issue_lines.append("")
                issue_lines.append("  **代码对比:**")
                issue_lines.append("")
                
                if issue.before_code:
                    issue_lines.append("  **修改前:**")
                    issue_lines.append(f"  ```{lang_suffix}")
                    # Code block - preserve original formatting
                    code_lines = issue.before_code.strip().split("\n")
                    for line in code_lines:
                        issue_lines.append(f"  {line}")
                    issue_lines.append("  ```")
                    issue_lines.append("")
                
                if issue.after_code:
                    issue_lines.append("  **修改后:**")
                    issue_lines.append(f"  ```{lang_suffix}")
                    # Code block - preserve original formatting
                    code_lines = issue.after_code.strip().split("\n")
                    for line in code_lines:
                        issue_lines.append(f"  {line}")
                    issue_lines.append("  ```")
                    issue_lines.append("")
            
            issue_lines.append("")
            return issue_lines

        # Add issues by severity
        if result.critical_issues:
            lines.extend([
                "### 🔴 Critical (严重)",
                "",
            ])
            for issue in result.critical_issues:
                lines.extend(format_issue(issue, include_code=True))

        if result.high_issues:
            lines.extend([
                "### 🟠 High (高)",
                "",
            ])
            for issue in result.high_issues:
                lines.extend(format_issue(issue, include_code=True))

        if result.medium_issues:
            lines.extend([
                "### 🟡 Medium (中)",
                "",
            ])
            for issue in result.medium_issues:
                lines.extend(format_issue(issue, include_code=True))

        if result.low_issues:
            lines.extend([
                "### 🟢 Low (低)",
                "",
            ])
            for issue in result.low_issues:
                lines.extend(format_issue(issue, include_code=False))

        # Add positives
        if result.positives:
            lines.extend([
                "## 优点",
                "",
            ])
            for positive in result.positives:
                lines.append(f"- {positive}")
            lines.append("")

        # Add suggestions
        if result.suggestions:
            lines.extend([
                "## 建议",
                "",
            ])
            for suggestion in result.suggestions:
                lines.append(f"- {suggestion}")
            lines.append("")

        # Add issue comparison
        if result.issue_comparison:
            lines.extend([
                "## Issue 需求对比",
                "",
                result.issue_comparison,
                "",
            ])

        lines.append("---")
        lines.append("*本报告由自动代码审查工具生成*")

        return "\n".join(lines)

