#coding:utf-8
import scrapy
import json
import csvout

class QuotesSpider(scrapy.Spider):
    name = "xueshu"

    def start_requests(self):
        url = 'http://xueshu.baidu.com/'
        tag = getattr(self, 'tag', None)
        header = {
            #加上User-Agnet后好使了
            'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36',
            'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
            'Accept-Encoding':'gzip, deflate, sdch, br',
            'Accept-Language':'zh-CN,zh;q=0.8,en;q=0.6',
            'Cache-Control':'max-age=0',
            'Connection':'keep-alive',
            'Host':'www.baidu.com',
           # 'RA-Sid':'7739A016-20140918-030243-3adabf-48f828',
            #'RA-Ver':'3.0.7',
            'Upgrade-Insecure-Requests':'1',
            #'Cookie':'%s' % getCookie()
            #'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            #'Accept-Language': 'en',
        }

        if tag is not None:
            #空格换成+号
            url = url + 's?wd=' + tag.replace(' ', '+') + '&rsv_bp=0&tn=SE_baiduxueshu_c1gjeupa&rsv_spt=3&ie=utf-8&f=8&rsv_sug2=0&sc_f_para=sc_tasktype%3D%7BfirstSimpleSearch%7D'
            yield scrapy.Request(url,headers=header,callback=self.parse)

    def parse(self, response):
        #res = response.css("div[class='head_nums_cont_inner'] div[class='nums']::text").extract_first()
        res = response.xpath('//*[@id="toolbar"]/span/text()').extract_first()
        #print res
        #先判断'约‘和’条‘不
        if res.find(u'约') > -1:
            num = res.split(u'约')[1].split(u'条')[0]
        else:
            num = res.split(u'到')[1].split(u'条')[0]
        num = int(num.replace(',', ''))
        outli = ['baidu_xueshu_num', num, response.url]
        csvout.list2csv(outli)
        yield {
            #len=227 代表爬虫被识别了
            #'len': len(response.body),
            'baidu_xueshu_num': num
        }

