#!/usr/bin/env python3
"""
GitHub Trending 爬虫脚本
用于爬取 https://github.com/trending 页面的热门仓库数据
"""

import requests
from bs4 import BeautifulSoup
import json
import time
from datetime import datetime
from typing import List, Dict, Optional

class GitHubTrendingScraper:
    def __init__(self):
        self.base_url = "https://github.com/trending"
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Language': 'en-US,en;q=0.5',
            'Accept-Encoding': 'gzip, deflate',
            'Connection': 'keep-alive',
        }
    
    def scrape_trending_repos(self, language: str = "", since: str = "daily", limit: int = 50) -> List[Dict]:
        """
        爬取 GitHub Trending 仓库数据
        
        Args:
            language: 编程语言过滤 (可选)
            since: 时间范围 (daily, weekly, monthly)
            limit: 最大返回数量 (默认50)
        
        Returns:
            仓库数据列表
        """
        url = f"{self.base_url}/{language}" if language else self.base_url
        params = {"since": since}
        
        try:
            response = requests.get(url, headers=self.headers, params=params, timeout=15)
            response.raise_for_status()
            
            soup = BeautifulSoup(response.content, 'html.parser')
            repos = []
            
            # 查找仓库列表容器
            repo_elements = soup.find_all('article', class_='Box-row')
            
            print(f"找到 {len(repo_elements)} 个仓库元素")
            
            for idx, repo_element in enumerate(repo_elements):
                if limit and len(repos) >= limit:
                    break
                    
                repo_data = self._parse_repo_element(repo_element)
                if repo_data:
                    repo_data['rank'] = len(repos) + 1
                    repos.append(repo_data)
            
            print(f"成功解析 {len(repos)} 个仓库")
            return repos
            
        except requests.RequestException as e:
            print(f"请求失败: {e}")
            return []
        except Exception as e:
            print(f"解析失败: {e}")
            import traceback
            traceback.print_exc()
            return []
    
    def _parse_repo_element(self, element) -> Optional[Dict]:
        """解析单个仓库元素"""
        try:
            # 仓库名称和链接
            name_element = element.find('h2', class_='h3')
            if not name_element:
                return None
                
            name_link = name_element.find('a')
            if not name_link:
                return None
                
            repo_name = name_link.get_text(strip=True)
            repo_url = f"https://github.com{name_link.get('href', '')}"
            
            # 描述
            desc_element = element.find('p', class_='col-9')
            description = desc_element.get_text(strip=True) if desc_element else ""
            
            # 编程语言
            lang_element = element.find('span', {'itemprop': 'programmingLanguage'})
            language = lang_element.get_text(strip=True) if lang_element else "Unknown"
            
            # 星标和 Fork 数
            stars_element = element.find('a', href=lambda x: x and '/stargazers' in x)
            stars = self._extract_number(stars_element.get_text(strip=True)) if stars_element else 0
            
            forks_element = element.find('a', href=lambda x: x and '/network/members' in x)
            forks = self._extract_number(forks_element.get_text(strip=True)) if forks_element else 0
            
            # 今日新增星标
            stars_today_element = element.find('span', class_='d-inline-block')
            stars_today = 0
            if stars_today_element:
                stars_today_text = stars_today_element.get_text(strip=True)
                if 'stars today' in stars_today_text:
                    stars_today = self._extract_number(stars_today_text)
            
            # 贡献者
            contributors = []
            contributor_elements = element.find_all('a', {'data-hovercard-type': 'user'})
            for contributor in contributor_elements[:5]:  # 最多取5个贡献者
                contributors.append(contributor.get('href', '').replace('/', ''))
            
            return {
                'name': repo_name,
                'description': description,
                'language': language,
                'stars': stars,
                'forks': forks,
                'stars_today': stars_today,
                'url': repo_url,
                'contributors': contributors,
                'scraped_at': datetime.now().isoformat()
            }
            
        except Exception as e:
            print(f"解析仓库元素失败: {e}")
            return None
    
    def _extract_number(self, text: str) -> int:
        """从文本中提取数字"""
        import re
        # 移除逗号和其他非数字字符，保留数字
        numbers = re.findall(r'[\d,]+', text.replace(',', ''))
        if numbers:
            try:
                return int(numbers[0])
            except ValueError:
                return 0
        return 0
    
    def save_to_json(self, data: List[Dict], filename: str = None):
        """保存数据到 JSON 文件"""
        if not filename:
            timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
            filename = f"github_trending_{timestamp}.json"
        
        with open(filename, 'w', encoding='utf-8') as f:
            json.dump(data, f, ensure_ascii=False, indent=2)
        
        print(f"数据已保存到: {filename}")
    
    def get_all_languages_trending(self) -> Dict[str, List[Dict]]:
        """获取所有语言的 Trending 数据"""
        languages = [
            "",  # 全部语言
            "javascript",
            "typescript", 
            "python",
            "java",
            "go",
            "rust",
            "c++",
            "c#",
            "php"
        ]
        
        all_data = {}
        for lang in languages:
            print(f"正在爬取 {lang or '全部语言'} 的 Trending 数据...")
            data = self.scraper.scrape_trending_repos(language=lang)
            all_data[lang or 'all'] = data
            time.sleep(1)  # 避免请求过于频繁
        
        return all_data

def main():
    """主函数"""
    scraper = GitHubTrendingScraper()
    
    print("开始爬取 GitHub Trending 数据...")
    
    # 爬取今日热门仓库
    trending_repos = scraper.scrape_trending_repos()
    
    if trending_repos:
        print(f"成功爬取 {len(trending_repos)} 个热门仓库")
        
        # 保存数据
        scraper.save_to_json(trending_repos)
        
        # 打印前5个仓库信息
        print("\n前5个热门仓库:")
        for i, repo in enumerate(trending_repos[:5], 1):
            print(f"{i}. {repo['name']}")
            print(f"   描述: {repo['description']}")
            print(f"   语言: {repo['language']}")
            print(f"   星标: {repo['stars']} (+{repo['stars_today']} today)")
            print(f"   Fork: {repo['forks']}")
            print(f"   URL: {repo['url']}")
            print()
    else:
        print("爬取失败，请检查网络连接或网站结构是否发生变化")

if __name__ == "__main__":
    main()
