#coding:utf-8
import scrapy
import json
import re
import unicodedata
import time
import os
import github_tag_csvout

class QuotesSpider(scrapy.Spider):
    name = "github_tag"
    sett = {}
    length = 0

    def start_requests(self):
        #搜索
        url = 'https://github.com/'
        tag = getattr(self, 'tag', None)
        if tag is not None:
            url = url + 'search?utf8=&q=' + tag + '&type='
            yield scrapy.Request(url, self.parse_search)

    def parse_search(self, response):

        first_one = response.css("ul[class='repo-list js-repo-list'] div div a::attr('href')").extract_first()
        if first_one is not None:
            first_one = response.urljoin(first_one)
            # print first_one
            yield scrapy.Request(first_one, self.parse)

    def parse_unsolved_date(self, response):
        res = response.xpath('/html/body/div/span[1]/span/relative-time/@datetime').extract_first()
        print 'unsolved'
        date = res[:10]
        date = date.replace('-', '')
        # version = response.xpath('/html/body/div/a/text()').extract_first()
        # version = version.split('version ')[1]
        li = [response.meta['version'], date, response.url]
        github_tag_csvout.list2csv(li)
        self.sett[response.meta['version']] = date
        if self.length == len(self.sett):
            yield self.sett

    def parse_date(self, response):
        #                     //*[@id="js-repo-pjax-container"]/div[2]/div[1]/div[6]/span[1]/span/relative-time
        res = response.xpath('//*[@id="js-repo-pjax-container"]/div[2]/div[1]/div[6]/span[1]/span/relative-time/@datetime').extract_first()
        #                         //*[@id="js-repo-pjax-container"]/div[2]/div[1]/div[4]/div[3]/button/span
        # version = response.xpath('//*[@id="js-repo-pjax-container"]/div[2]/div[1]/div[4]/div[3]/button/span/text()').extract_first()
        # print version
        version = os.path.basename(response.url)
        #version转换成ascii
        # version = unicodedata.normalize('NFKD', version).encode('ascii', 'ignore')
        # reg = version.replace('\n', '')
        # print reg
        #此时很可能未来得及加载
        if res is None:
            newurl = response.xpath('//*[@id="js-repo-pjax-container"]/div[2]/div[1]/include-fragment[1]/@src').extract_first()
            #meta传递参数
            yield scrapy.Request(response.urljoin(newurl), meta={'version':version, 'is_last':response.meta['is_last']},callback=self.parse_unsolved_date)
        else:
            date = res[:10]
            date = date.replace('-','')
            li = [version, date, response.url]
            github_tag_csvout.list2csv(li)
            self.sett[version] = date
            if self.length == len(self.sett):
                yield self.sett

    def parse(self, response):
        #web link changed a litter, a div[4] changed to div[5]
        #res = response.xpath('//*[@id="js-repo-pjax-container"]/div[2]/div[1]/div[4]!!!!!/div[3]/div/div/div[4]/div[1]/a/@href').extract()
        res = response.xpath('//*[@id="js-repo-pjax-container"]/div[2]/div[1]/div[5]/div[3]/div/div/div[4]/div[1]/a/@href').extract()

        #sett = {}
        self.length = len(res)
        for item in res:
            if item == res[-1]:
                yield scrapy.Request(response.urljoin(item), meta={'is_last':1}, callback=self.parse_date)
            else:
                yield scrapy.Request(response.urljoin(item), meta={'is_last':0}, callback=self.parse_date)
            #从dict it中加入到 dict sett中
        #     sett[it.items()[0][1]] = it.items()[0][1]

        # yield sett
