#coding:utf-8
import scrapy
import json
import csvout
import time


class QuotesSpider(scrapy.Spider):
    name = "blogs"
    result = {}
    num = 0

    def start_requests(self):
        tag = getattr(self, 'tag', None)
        header = {
            #加上User-Agnet后好使了
            'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',
            'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Encoding':'gzip, deflate, sdch, br',
            'Accept-Language':'zh-CN,zh;q=0.8,en;q=0.6',
            'Cache-Control':'max-age=0',
            'Connection':'keep-alive',
            'Host':'www.baidu.com',
           # 'RA-Sid':'7739A016-20140918-030243-3adabf-48f828',
            #'RA-Ver':'3.0.7',
            'Upgrade-Insecure-Requests':'1',
            #'Cookie':'%s' % getCookie()
            #'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            #'Accept-Language': 'en',
        }
        if tag is not None:
            url = 'http://so.51cto.com' + '/index.php?keywords=' + tag + '&project=blog&sort=time'
            yield scrapy.Request(url, callback=self.parse)
            url1 = 'http://so.csdn.net/so/search/s.do' + '?q=' + tag + '&t=blog'
            yield scrapy.Request(url1, meta={'na':'csdn_blog_num'}, callback=self.parse2)
            url2 = 'http://zzk.cnblogs.com/' + 's/blogpost?Keywords=' + tag
            yield scrapy.Request(url2, callback=self.parse3)
            # self.waitt()

    def parse(self, response):
        #从chrome里拷出来的xpath
        try:
            res = response.xpath('//*[@id="res-neck"]/div[2]/strong').extract_first()
            #过滤出其中的数字
            num = filter(lambda x: x.isdigit(), res)
            num = int(num)

            outli = ['51cto_num', num, response.url]
            self.result['51cto博客'] = num
        except:
            self.result['51cto博客'] = -1
        finally:
            self.num = self.num + 1
            if self.num == 3 :
                print json.dumps(self.result) 

            
        

    def parse2(self, response):
        try:
            res = response.css("span[class='page-nav'] span[class='text']::text").extract_first()
            #unicode字符串，取出'共‘和’条'之间的数字
            num = res.split(u'共')[1].split(u'条')[0]
            num = int(num)
            self.result['CSDN博客'] = num
        except:
            self.result['CSDN博客'] = -1
        finally:
            self.num = self.num + 1
            if self.num == 3 :
                print json.dumps(self.result)

    def parse3(self, response):
        try:
            #从chrome里拷出来的xpath
            res = response.xpath('//*[@id="CountOfResults"]').extract_first()
            #过滤出其中的数字
            num = filter(lambda x: x.isdigit(), res)
            num = int(num)
            self.result['博客园'] = num
        except:
            self.result['博客园'] = -1
        finally:
            self.num = self.num + 1
            if self.num == 3 :
                print json.dumps(self.result)

    # def waitt(self):
    #     while(self.num < 3):
    #         time.sleep(0.1)
    #     print json.dumps(self.result)