#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
@Project ：python_learning 
@File ：github_spider.py
@IDE  ：PyCharm 
@Author ：李涵彬
@Date ：2025/1/7 下午4:19 
"""

import scrapy
from typing import Dict, Any, List


class GithubSpider(scrapy.Spider):
	name: str = 'github'
	allowed_domains: List[str] = ['github.com']
	start_urls: List[str] = ['https://github.com/neuks?tab=repositories']  # 替换为实际用户名

	def parse(self, response: scrapy.http.Response) -> scrapy.Selector:
		# 解析仓库列表
		for repo in response.css('li.col-12.d-flex.width-full.py-4.border-bottom'):
			item: Dict[str, Any] = {
				'name': repo.css('a.mr-3::text').get().strip(),
				'description': repo.css('p.f6.text-gray.mt-2::text').get().strip() if repo.css(
					'p.f6.text-gray.mt-2::text').get() else '',
				'language': repo.css('span.mr-3::text').get().strip() if repo.css('span.mr-3::text').get() else '',
				'stars': repo.css('a.social-count::text').get().strip() if repo.css(
					'a.social-count::text').get() else '0'
			}
			yield item

		# 处理分页
		next_page: str = response.css('a.btn.btn-outline.Btn-sm::attr(href)').get()
		if next_page:
			yield response.follow(next_page, self.parse)
