#coding:utf-8
import scrapy
import simplejson
from urlparse import urlparse
import computeDaysByNums
import csvout

class QuotesSpider(scrapy.Spider):
    name = "github_url"
    open_issues_num = 0
    closed_issues_num = 0
    total_issues_num = 0
    def start_requests(self):
        #搜索
        url = 'https://github.com/'
        tag = getattr(self, 'tag', None)
        if tag is not None:
            url = url + 'search?utf8=&q=' + tag + '&type='
            yield scrapy.Request(url, self.parse_search)

    def parse_search(self, response):
        first_ones = response.xpath('//*[@id="js-pjax-container"]/div[1]/div/div[1]/ul/div/div[1]/h3/a/@href').extract()
        #first_one = response.css("ul[class='repo-list js-repo-list'] div div a::attr('href')").extract_first()
        github = []
        if first_ones is not None:
            for first_one in first_ones[:3]:
                name = first_one[1:]
                first_one = response.urljoin(first_one)
                github.append({"name":name,"path":first_one})
            # print first_one
        json_object = simplejson.dumps(github)
        # print 'aaaaaa'
        # print json_object
        self.save_to_file('github_url.json', json_object)

        # for quote in response.css('div.quote'):
        #     yield {
        #         'text': quote.css('span.text::text').extract_first(),
        #         'author': quote.css('small.author::text').extract_first(),
        #     }
        # next_page = response.css('li.next a::attr(href)').extract_first()
        # if next_page is not None:
        #     next_page = response.urljoin(next_page)
        #     yield scrapy.Request(next_page, self.parse)

    def save_to_file(self, file_name, contents):
        fh = open(file_name, 'w')
        fh.write(contents)
        fh.close()