#coding:utf-8
import scrapy
import simplejson as json
import unicodedata
import re
import computeDaysByNums
import csvout
import sys
sys.path.append('../')
import toDB

class QuotesSpider(scrapy.Spider):
    name = "openhub"
    level_num = {}
    tid = 0

    def start_requests(self):
        #在黑鸭子openhub中进行搜索
        # header = {
        #     #加上User-Agnet后好使了
        #     'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',
        #     'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
        #     'Accept-Encoding':'gzip, deflate, sdch, br',
        #     'Accept-Language':'zh-CN,zh;q=0.8,en;q=0.6',
        #     'Cache-Control':'max-age=0',
        #     'Connection':'keep-alive',
        #     'Host':'www.baidu.com',
        #    # 'RA-Sid':'7739A016-20140918-030243-3adabf-48f828',
        #     #'RA-Ver':'3.0.7',
        #     'Upgrade-Insecure-Requests':'1',
        #     #'Cookie':'%s' % getCookie()
        #     #'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        #     #'Accept-Language': 'en',
        # }
        tag = getattr(self, 'tag', None)
        tid = getattr(self, 'tid', None)
        if tag is not None:
            # yield scrapy.Request(url, meta={'dont_redirect':True}, headers=header,callback=self.parse)
            yield scrapy.Request(tag, callback=self.parse)

    def get_code_num(self, response):
        #把json转换为python对象
        json_to_python = json.loads(response.body.decode(response.encoding))
        tol = json_to_python['series'][0]['data'][-4:]
        #print tol[3][1],tol[2][1],tol[1][1],tolss[0][1]
        outli = ['code_last_three_month_added', tol[3][1] - tol[0][1], response.url]
        csvout.list2csv(outli)
        yield {'code': tol[3][1] - tol[0][1]}

    def get_commit_num(self, response):
        #把json转换为python对象
        json_to_python = json.loads(response.body.decode(response.encoding))
        data = json_to_python['series'][0]['data']
        to = data[-3:]
        latest_commits_num = to[2][1] + to[1][1] + to[0][1]
        avarage_commits_num = 0
        total_commits_num = 0
        for item in data:
            total_commits_num = total_commits_num + item[1]
        if len(data) > 36:
            data = data[-36:]
        res = 0
        for item in data:
            avarage_commits_num = avarage_commits_num + item[1]
        avarage_commits_num = (avarage_commits_num  * 3) / len(data)
        out = [
            ['total_commits_num', total_commits_num, response.url],
            ['latest_commits_num', latest_commits_num, response.url],
            ['average_commits_num', avarage_commits_num, response.url],
            # 'pull_requests_num': pull_requests_num
        ]
        for item in out:
            csvout.list2csv(item)
        print toDB.insert_project_info(self.tid, 'community_commits',  total_commits_num, response.url)
        yield {'total_commits_num': total_commits_num,
                'latest_commits_num': latest_commits_num,
               'average_commits_num': avarage_commits_num}
        #前面的是去掉小数点后的时间戳
        #time.localtime(1485907200.000)
        #time.struct_time(tm_year=2017, tm_mon=2, tm_mday=1, tm_hour=8, tm_min=0, tm_sec=0, tm_wday=2, tm_yday=32, tm_isdst=0)
    
    #此功能已废除
    def get_contributors_num(self, response):
        json_to_python = json.loads(response.body.decode(response.encoding))
        data = json_to_python['series'][0]['data']
        #如果不够三年的数据，有多少算多少
        #如果超出三年，有多少算多少
        if len(data) > 36:
            data = data[-36:]
        res = 0
        for item in data:
            res = res + item[1]
        res = (res  * 3) / len(data)
        print toDB.insert_project_info(self.tid, 'community_contributors',  res, response.url)
        yield {'contributors_num': res}

    def parse(self, response):
        tags = response.xpath('//*[@id="project_tags"]/p/a/text()').extract()
        info_tags = json.dumps(tags)

        code_url = response.css("div[class='chart watermark440']::attr(datasrc)").extract()
        yield scrapy.Request(code_url[0], self.get_code_num)
        yield scrapy.Request(code_url[1], self.get_commit_num)
        #yield scrapy.Request(code_url[2], self.get_contributors_num)
        #range后面取不到
        
       

        #yield {
            #'code': scrapy.Request(code_url, get_code_num)
            # .replace去掉其中的换行符
            #'rating': response.css('div.rating_stars_value::text').extract_first().replace('\n','')
        #}

