import hashlib
import json
import shutil
from pathlib import Path
from typing import Dict, List, Optional
from lxml import etree
import requests


class DeepSeekCodeAnalyzer:
    def __init__(self):
        self.api_key = 'sk-b6eb6b5c93884e5aa2676e1e0d60d620'
        self.api_url = "https://api.deepseek.com/v1/chat/completions"
        self.cache_dir = Path('.code_cache')
        # 先删除目录（如果存在）
        shutil.rmtree(self.cache_dir, ignore_errors=True)
        # 然后新建目录
        self.cache_dir.mkdir(exist_ok=True)

    def analyze_html_file(self, html_file: str) -> Dict:
        """分析HTML文件中的所有代码片段"""
        with open(html_file, 'r', encoding='utf-8') as f:
            html = f.read()
            # self.input_html(html)

        code_blocks = self._extract_code_blocks(html)
        results = []
        for idx, code in enumerate(code_blocks, 1):
            cache_key = self._generate_cache_key(code)
            cached_result = self._load_from_cache(cache_key)

            if cached_result:
                results.append(cached_result)
            else:
                result = self._analyze_code_with_deepseek(code, idx, html)
                self._save_to_cache(cache_key, result)
                results.append(result)

        # return self._generate_report(results)

    def input_html(self, html: str) -> None:
        """调用DeepSeek API输入原始html"""
        headers = {"Authorization": f"Bearer {self.api_key}"}
        first_message = {
            "role": "user",
            "content": f"以下是我需要后续分析的HTML文件，请记住它，但暂时不需要分析：\n{html}"
        }

        data = {
            "model": "deepseek-chat",
            "messages": [first_message]
        }
        response = requests.post(self.api_url, headers=headers, json=data)
        print(response.json()["choices"][0]["message"]["content"])

    def _extract_code_blocks(self, html: str) -> List[str]:
        """使用BeautifulSoup提取所有<pre><code>内容"""
        tree = etree.HTML(html)
        code_blocks = []
        codes = tree.xpath('//pre')
        for code in codes:
            code_blocks.append(code.text)
        return code_blocks

    def _analyze_code_with_deepseek(self, code: str, position: int, html: str) -> Dict:
        """调用DeepSeek API分析代码语法"""
        headers = {
            "Authorization": f"Bearer {self.api_key}",
            "Content-Type": "application/json"
        }

        prompt = f"""作为ArkTS语言专家和应用开发专家，请检查文中的代码片段是否有问题，包括语法问题和代码逻辑问题，结果用中文回答：
                {code}，不完整的代码需要根据{html}源代码中上下文进行相应的判断。
                要求：
                1. 语法错误
                2. 永远为假的循环条件
                3. 未使用的变量
                4. 无法到达的代码
                5. 所有的代码都在html中，有些代码不完整需要结合html中的上下文进行判断

                返回JSON格式：
                {{
                    "position": "片段{position}",
                    "code": {code},
                    "result":["如果存在问题则输出Failed，如果没问题输出Pass"]
                    "is_complete": 布尔值,
                    "syntax_errors": ["问题描述"],
                    "missing_definitions": ["缺失的类/变量"],
                    "suggested_fixes": ["修复建议"]
                    “use_html”:["告诉我你是否用到前面给你的html文件进行判断"]
                }}"""

        payload = {
            "model": "deepseek-chat",
            "messages": [{"role": "user", "content": prompt}],
            "temperature": 0.2  # 降低随机性
        }

        try:
            response = requests.post(self.api_url, headers=headers, json=payload)
            print(response.json()["choices"][0]["message"]["content"])
            response.raise_for_status()
            return response.json()["choices"][0]["message"]["content"]
        except Exception as e:
            return {
                "position": f"片段{position}",
                "error": f"API调用失败: {str(e)}",
                "original_code": code
            }

    def _save_to_cache(self, key: str, data: Dict):
        """保存结果到缓存"""
        with open(self.cache_dir / f'{key}.json', 'w', encoding='utf-8')as f:
            json.dump(data, f, ensure_ascii=False, indent=2)

    def _generate_report(self, results: List[Dict]) -> Dict:
        """生成综合分析报告"""
        stats = {
            "total_snippets": len(results),
            "error_snippets": sum(1 for r in results if r.get("syntax_errors")),
            "missing_definitions": sum(len(r.get("missing_definitions", [])) for r in results)
        }

        return {
            "metadata": stats,
            "details": results,
            "summary": self._create_summary(results)
        }

    def _create_summary(self, results: List[Dict]) -> str:
        """生成Markdown格式摘要"""
        summary = ["# 代码分析报告", f"共分析 {len(results)} 个代码片段\n"]

        for res in results:
            summary.append(f"\n## {res['position']}")
            summary.append(f"```typescript\n{res.get('original_code', '')}\n```")

            if errors := res.get("syntax_errors"):
                summary.append("❌ **语法错误:**")
                summary.extend(f"- {e}" for e in errors)

            if missing := res.get("missing_definitions"):
                summary.append("🔍 **缺失定义:**")
                summary.extend(f"- {m}" for m in missing)

            if fixes := res.get("suggested_fixes"):
                summary.append("💡 **修复建议:**")
                summary.extend(f"- {f}" for f in fixes)

        return "\n".join(summary)

    def _generate_cache_key(self, code: str) -> str:
        """生成缓存文件名"""
        return hashlib.md5(code.encode()).hexdigest()

    def _load_from_cache(self, key: str) -> Optional[Dict]:
        cache_file = self.cache_dir / f'{key}.json'
        if cache_file.exists():
            with open(cache_file, 'r', encoding='utf-8')as f:
                return json.load(f)
        return None


if __name__ == '__main__':
    ds = DeepSeekCodeAnalyzer()
    ds.analyze_html_file('test.html')

