#coding:utf-8
import scrapy
import json
from urlparse import urlparse
import computeDaysByNums
import csvout
import sys
sys.path.append('../')
import toDB

class QuotesSpider(scrapy.Spider):
    name = "github"
    open_issues_num = 0
    closed_issues_num = 0
    total_issues_num = 0
    tid = 0

    def start_requests(self):
        #搜索
        tag = getattr(self, 'tag', None)
        tid = getattr(self, 'tid', None)
        if tag is not None:
            yield scrapy.Request(tag, self.parse)


    def get_issues_num(self, response):
        closed_page = response.xpath('//*[@id="js-issues-toolbar"]/div/div[1]/a[2]/@href').extract_first()
        if closed_page is not None:
            yield scrapy.Request(response.urljoin(closed_page), self.get_closed_issues_num)
        #<div class="d-table table-fixed width-full Box-row--drag-hide">
        #<div class="float-left col-9 p-2 lh-condensed">
        #<div class="mt-1 text-small text-gray">
        #<span class="opened-by">
        #<relative-time datetime="2017-04-26T10:11:00Z" title="Apr 26, 2017, 6:11 PM GMT+8">6 days ago</relative-time>
        dates = response.css("div[class='d-table table-fixed width-full Box-row--drag-hide'] div div span relative-time::attr(datetime)").extract()
        for date in dates:
            if computeDaysByNums.githubTime(date) <= 90 :
                self.open_issues_num = self.open_issues_num + 1
            else :
                out = ['open_issues_num', self.open_issues_num, response.url]
                csvout.list2csv(out)
                print toDB.insert_project_info(self.tid, 'activity_iss_open',  self.open_issues_num, response.url)
                yield {'open_issues_num': self.open_issues_num}
                #return不加返回值默认返回None，利用return 退出函数
                return
        #next_page model
        #<div class="paginate-container">
        #<div class="pagination">
        #<a class="next_page" rel="next" href="/alibaba/dubbo/issues?page=2&amp;q=is%3Aissue+is%3Aopen">Next</a>
        next_page = response.css("div[class='paginate-container'] div a[class='next_page']::attr(href)").extract_first()
        if next_page is not None:
            yield scrapy.Request(response.urljoin(next_page), self.get_issues_num)
        else:
            #没有下一页，函数结束
            out = ['open_issues_num', self.open_issues_num, response.url]
            csvout.list2csv(out)
            print toDB.insert_project_info(self.tid, 'activity_iss_open',  self.open_issues_num, response.url)
            yield {'open_issues_num': self.open_issues_num}
        #Spider must return Request, BaseItem, dict or None
        #yield {'issues_num': self.issues_num}

    def get_closed_issues_num(self, response):

        #<div class="d-table table-fixed width-full Box-row--drag-hide">
        #<div class="float-left col-9 p-2 lh-condensed">
        #<div class="mt-1 text-small text-gray">
        #<span class="opened-by">
        #<relative-time datetime="2017-04-26T10:11:00Z" title="Apr 26, 2017, 6:11 PM GMT+8">6 days ago</relative-time>
        dates = response.css("div[class='d-table table-fixed width-full Box-row--drag-hide'] div div span relative-time::attr(datetime)").extract()
        for date in dates:
            if computeDaysByNums.githubTime(date) <= 90 :
                self.closed_issues_num = self.closed_issues_num + 1
            else :
                out = [['closed_issues_num', self.closed_issues_num, response.url],
                        ['total_issues_num', self.closed_issues_num + self.open_issues_num, response.url]]
                for item in out:
                    csvout.list2csv(item)
                print toDB.insert_project_info(self.tid, 'activity_iss_close',  self.closed_issues_num, response.url)
                print toDB.insert_project_info(self.tid, 'activity_issues',  self.closed_issues_num + self.open_issues_num, response.url)
                yield {'closed_issues_num': self.closed_issues_num,
                        'total_issues_num': self.closed_issues_num + self.open_issues_num}
                #return不加返回值默认返回None，利用return 退出函数
                return
        #next_page model
        #<div class="paginate-container">
        #<div class="pagination">
        #<a class="next_page" rel="next" href="/alibaba/dubbo/issues?page=2&amp;q=is%3Aissue+is%3Aopen">Next</a>
        next_page = response.css("div[class='paginate-container'] div a[class='next_page']::attr(href)").extract_first()
        if next_page is not None:
            yield scrapy.Request(response.urljoin(next_page), self.get_closed_issues_num)
        else:
            #没有下一页，函数结束
            out = [['closed_issues_num', self.closed_issues_num, response.url],
                   ['total_issues_num', self.closed_issues_num + self.open_issues_num, response.url]]
            for item in out:
                csvout.list2csv(item)
            print toDB.insert_project_info(self.tid, 'activity_iss_close',  self.closed_issues_num, response.url)
            print toDB.insert_project_info(self.tid, 'activity_issues',  self.closed_issues_num + self.open_issues_num, response.url)
            yield {'closed_issues_num': self.closed_issues_num,
                   'total_issues_num': self.closed_issues_num + self.open_issues_num}
        #Spider must return Request, BaseItem, dict or None
        #yield {'issues_num': self.issues_num}

    def get_pulls_num(self, response):
        open_num = response.xpath('//*[@id="js-issues-toolbar"]/div/div[1]/a[1]/text()').extract()
        closed_num = response.xpath('//*[@id="js-issues-toolbar"]/div/div[1]/a[2]/text()').extract()
        #extract得到的内容是list
        #[u'\n      ', u'\n      3 Open\n    ']
        open_num = int(filter(unicode.isdigit, open_num[1]))
        closed_num = int(filter(unicode.isdigit, closed_num[1]))

        out = [['pulls_open_num', open_num, response.url],
               ['pulls_closed_num', closed_num,response.url],
               ['pulls_total_num', open_num + closed_num, response.url]]
        for item in out:
            csvout.list2csv(item)
        print toDB.insert_project_info(self.tid, 'activity_req_open',  open_num, response.url)
        print toDB.insert_project_info(self.tid, 'activity_req_close',  closed_num, response.url)
        yield {'pulls_open_num': open_num,
               'pulls_closed_num': closed_num,
               'pulls_total_num': open_num + closed_num}

    def parse(self, response):
        # path = urlparse(response.url).path
        #如果在中间加上'/watchers'会出错，放后面可以
        # watch_num = num = response.css('a[href="%s"]::text' %(path+'/watchers') ).extract_first()
        # watch_num = int(watch_num.replace(',', ''))  # 去掉数字中的','
        # star_num = num = response.css('a[href="%s"]::text' %(path+'/stargazers') ).extract_first()
        # star_num = int(star_num.replace(',', ''))
        # fork_num = response.css('a[href="%s"]::text' %(path+'/network') ).extract_first()
        # fork_num = int(fork_num.replace(',', ''))
        # # pull_requests_num = response.xpath('//*[@id="js-repo-pjax-container"]/div[1]/div[2]/nav/span[3]/a/span[2]/text()').extract_first()
        # # pull_requests_num = int(pull_requests_num.replace(',', ''))
        # out = [
        #     ['watch_num' , watch_num, response.url],
        #     ['star_num' , star_num, response.url],
        #     ['fork_num' , fork_num, response.url],
        #     # 'pull_requests_num': pull_requests_num
        # ]
        # for item in out:
        #     csvout.list2csv(item)
        # print toDB.insert_project_info(self.tid, 'activity_watch',  watch_num, response.url)
        # print toDB.insert_project_info(self.tid, 'activity_star',  star_num, response.url)
        # print toDB.insert_project_info(self.tid, 'activity_fork',  fork_num, response.url)
        # yield {
        #     'watch_num' : watch_num,
        #     'star_num' : star_num,
        #     'fork_num' : fork_num
        #     # 'pull_requests_num': pull_requests_num
        # }
        issues_url = response.url + '/issues'
        yield scrapy.Request(issues_url, self.get_issues_num)
        pulls_url = response.url + '/pulls'
        yield  scrapy.Request(pulls_url, self.get_pulls_num)
