#coding:utf-8
import scrapy
import json
import unicodedata
import re
import computeDaysByNums
import time

class QuotesSpider(scrapy.Spider):
    name = "contributor_level"
    level_num = {}

    def start_requests(self):
        #在黑鸭子openhub中进行搜索
        url = 'https://www.openhub.net/'
        tag = getattr(self, 'tag', None)
        if tag is not None:
            url = url + 'p?query=' + tag + '&sort=relevance'
            yield scrapy.Request(url, self.parse_search)

    def parse_search(self, response):
        first_one = response.xpath('//*[@id="project_603413"]/h2/a/@href').extract_first()
        # first_one = response.css('div[id="projects_index_list"] h2[class="title pull-left"] a::attr(href)').extract_first()
        if first_one is not None:
            first_one = response.urljoin(first_one)
            # print first_one
            yield scrapy.Request(first_one, self.parse)

        # for quote in response.css('div.quote'):
        #     yield {
        #         'text': quote.css('span.text::text').extract_first(),
        #         'author': quote.css('small.author::text').extract_first(),
        #     }
        # next_page = response.css('li.next a::attr(href)').extract_first()
        # if next_page is not None:
        #     next_page = response.urljoin(next_page)
        #     yield scrapy.Request(next_page, self.parse)

    def get_contributors_level(self, response):
        resset = response.xpath('//*[@id="page_contents"]/table/tbody/tr').extract()
        # flag = True
        for res in resset:
            #unicode转换为string

            resai = unicodedata.normalize('NFKD', res).encode('ascii', 'ignore')
            #去掉换行符，不然影响正则表达式匹配
            reg = resai.replace('\n','')
            #匹配开发者等级
            level = re.match(r'.*KudoRank (\d).*', reg).group(1)
            level = int(level)
            #匹配开发者时间
            time = re.match(r'.*date\" title=\"(.*)\"', reg).group(1)
            timeDiff = computeDaysByNums.githubTime(time)
            if timeDiff <= 365 * 3:
                # flag = False
                # break
                self.level_num[level] = self.level_num[level] + 1
            # self.level_num[level] = self.level_num[level] + 1
        # 加上@获取属性值
        #xpath,last()取最后一个元素的标号值
        # print self.level_num
        next_page = response.xpath('//*[@id="page_contents"]/div[2]/ul/li[last()]/a/@href').extract_first()
        # print 'here comes flag'
        # if flag is False:
        #     print 'here comes right end'
        #     yield self.level_num
        if next_page is not None and next_page is not response.url:
            yield scrapy.Request(response.urljoin(next_page), self.get_contributors_level)
        else:
            yield self.level_num

    def parse(self, response):
        for i in range(1,10):
            self.level_num[i] = 0
        yield scrapy.Request(response.url + '/contributors?sort=kudo_position', self.get_contributors_level)


