# -*- coding: utf-8 -*-
import scrapy
from shiyanlou.items import RepositoryItem


class GithubSpider(scrapy.Spider):
    name = 'github'
    start_urls = ['']
    id = 0

    @property
    def start_urls(self):
        return ('https://github.com/shiyanlou?page={}&tab=repositories'.format(i) for i in range(1,5))

    def parse(self, response):
        for course in response.css('div#user-repositories-list ul li'):
            self.id += 1
            item = RepositoryItem()
            item['id'] = self.id
            item['name'] = course.css('h3 a::text').extract_first().strip()
            item['update_time'] = course.xpath('.//relative-time/@datetime').extract_first()
            course_url = response.urljoin(course.css('h3 a::attr(href)').extract_first())
            request = scrapy.Request(course_url, callback=self.parse_other)
            request.meta['item'] = item
            yield request

    def parse_other(self, response):
        item = response.meta['item']
        item['commits'] = ''.join( response.css('ul.numbers-summary li.commits span::text').re("\d+") )
        //#js-repo-pjax-container >  li:nth-child(2)
        item['branches'] = ''.join( response.css('ul.numbers-summary li:nth-child(2) span::text').re("\d+") )
        item['releases'] = ''.join( response.css('ul.numbers-summary li:nth-child(3) span::text').re("\d+") )
        yield item