import scrapy
from scrapy import Selector, Request
from gitee_scrapy.items import GiteeScrapyItem


class GiteeSpider(scrapy.Spider):
    name = "gitee"
    allowed_domains = ["gitee.com"]
    # start_urls = ["https://gitee.com/explore/all"]
    def start_requests(self):
        for page in range(1, 101):
            yield Request(url=f'https://gitee.com/explore/all?page={page}')

    def parse(self, response):
        sel = Selector(response)
        lis = sel.css('.content')
        for item in lis:
            project = GiteeScrapyItem();
            title = item.css('.project-title a::attr(title)').get().split("/")
            # 作者
            project['author'] = title[0]
            # 仓库名
            project['repository'] = title[1]
            # 项目描述
            project['desc'] = item.css('.project-desc::text').get()
            # 语言分类
            project['language'] = item.css('.project-item-bottom .d-align-center a::attr(title)').get()
            # 仓库地址 https://gitee.com/ + url
            url = item.css('.project-title a::attr(href)').get() or ''
            project['repository_url'] = 'https://gitee.com/' + url
            # 传入详情页地址，获取详情页数据
            yield Request(
                url = project['repository_url'], callback=self.parse_detail, cb_kwargs={'project': project}
            )

    # 获取详情页数据
    def parse_detail(self, response, **kwargs):
        project = kwargs['project']
        sel = Selector(response)
        # 克隆仓库的地址
        project['clone_url'] = sel.css('input#project_url_clone::attr(value)').get()
        yield project
