# src/adapters/github_adapter.py
import re
import requests
from bs4 import BeautifulSoup
from typing import Dict, Optional
from .base_adapter import BaseAdapter
from src.utils.anti_spider import AntiSpiderMiddleware

class GitHubAdapter(BaseAdapter):
    _DOMAIN_PATTERN = re.compile(r"github\.com")
    _API_BASE = "https://api.github.com"
    _CONTENT_TYPES = {
        "file": "文件",
        "dir": "目录",
        "symlink": "符号链接"
    }

    def __init__(self):
        self.middleware = AntiSpiderMiddleware()
        self.middleware.set_proxy_pool([
            'http://proxy1.example.com:8080',
            'http://proxy2.example.com:8080'
        ])

    def detect(self, url: str) -> bool:
        """检测GitHub仓库URL格式示例：
        https://github.com/{owner}/{repo}
        https://github.com/{owner}/{repo}/blob/{branch}/path/to/file
        """
        return bool(self._DOMAIN_PATTERN.search(url))

    def parse(self, html: str) -> Dict:
        """解析GitHub页面内容"""
        soup = BeautifulSoup(html, 'lxml')
        
        # 提取基础元数据
        metadata = {
            'title': self._extract_title(soup),
            'owner': self._extract_owner(soup),
            'description': self._extract_description(soup),
            'stars': self._extract_stars(soup),
            'files': self._extract_file_tree(soup),
            'license': self._extract_license(soup),
            'content_type': 'repository'
        }
        
        # 如果是文件路径
        if '/blob/' in self.current_url:
            metadata.update({
                'content_type': 'file',
                'content': self._extract_file_content(soup),
                'raw_url': self._get_raw_url(soup)
            })
        
        return metadata

    def download(self, url: str) -> bytes:
        """下载仓库内容"""
        if '/blob/' in url:
            return self._download_file_content(url)
        return self._download_repo_archive(url)

    # -- 私有方法 --
    def _extract_title(self, soup) -> str:
        return soup.find('h1', {'class': 'repohead'}).get_text(strip=True).replace('\n', ' ')

    def _extract_owner(self, soup) -> Dict:
        owner_link = soup.find('a', {'data-hovercard-type': 'organization'})
        return {
            'name': owner_link.get_text(strip=True),
            'url': owner_link['href']
        }

    def _extract_file_tree(self, soup) -> list:
        """提取文件目录结构"""
        return [{
            'name': item.find('a', {'class': 'js-navigation-open'}).text,
            'type': self._CONTENT_TYPES.get(
                item.find('svg')['aria-label'], 'unknown'
            )
        } for item in soup.find_all('div', {'role': 'row'})]

    def _download_repo_archive(self, url: str) -> bytes:
        """下载仓库ZIP包"""
        repo_match = re.search(r"github\.com/(.+?)/(.+?)(/|$)", url)
        api_url = f"{self._API_BASE}/repos/{repo_match.group(1)}/{repo_match.group(2)}/zipball"
        response = self.middleware.safe_request(api_url)
        return response.content

    # -- 平台专属功能 --
    def get_stargazers(self, owner: str, repo: str) -> list:
        """获取Star用户列表（示例API集成）"""
        api_url = f"{self._API_BASE}/repos/{owner}/{repo}/stargazers"
        return requests.get(api_url).json()

    def render_readme(self, owner: str, repo: str) -> str:
        """渲染README为HTML格式"""
        api_url = f"{self._API_BASE}/repos/{owner}/{repo}/readme"
        response = requests.get(api_url, headers={'Accept': 'application/vnd.github.html'})
        return response.text

    # -- 界面集成方法 --
    def format_preview(self, metadata: Dict) -> str:
        """生成界面预览内容"""
        preview_html = f"""
        <div class="github-preview">
            <h2>{metadata['title']}</h2>
            <div class="stats">
                <span class="stars">★ {metadata['stars']}</span>
                <span class="license">📜 {metadata['license']}</span>
            </div>
            <p class="description">{metadata['description']}</p>
            {"".join(self._gen_file_tree(metadata['files']))}
        </div>
        """
        return preview_html

    def _gen_file_tree(self, files: list) -> list:
        """生成文件树HTML"""
        return [f"""
        <div class="file-item {item['type']}">
            <span class="icon">{'📁' if item['type']=='目录' else '📄'}</span>
            {item['name']}
        </div>
        """ for item in files]