# -- coding: utf-8 --
import scrapy  # 导入scrapy包
import time
class QuotesSpider(scrapy.Spider):  #scrapy.Spider 这个必须继承
    name = "quotes"    #demo名字后面会用到的


    def stringToDict(self,cookie):
        itemDict = {}
        items = cookie.split(';')
        for item in items:
            key = item.split('=')[0].replace(' ', '')  # 记得去除空格
            value = item.split('=')[1]
            itemDict[key] = value
        return itemDict

    def start_requests(self):
        urls = [
            'https://www.360.cn/',
            # 'https://www.jianshu.com/search?q=DOTA&page=1&type=note',
            # 'https://www.taobao.com/',
            # 'http://www.qq.com/',
             # 'https://www.baidu.com/',
            # 'http://news.163.com/'
        ]
        cookies = 'BAIDUID=08E7DB40750D5CAAE3183DC20B25A213:FG=1; BIDUPSID=08E7DB40750D5CAAE3183DC20B25A213; PSTM=1524020840; BD_UPN=12314353; __cfduid=df05ffd46b45effe4d863325450817d6f1524104683; BDUSS=lFBd3FDdUtZSy1sNkpVVX5va2lIdkNGekFTRnZHM20wMXNjV3dnfmJjYVlyeUJiQVFBQUFBJCQAAAAAAAAAAAEAAAD8pq2CZnJlZWZhbmZhbjQ5MQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJgi-VqYIvlad; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; H_PS_PSSID=1450_21117_20929; BD_CK_SAM=1; PSINO=3; H_PS_645EC=f14aOmFtRpjJfROMhiyTSridiVVdKOStaJC4Du4FEfhlaWWdgOrvGdsqvMT5OUtJcjie; BD_HOME=1'
        cookie_dict = self.stringToDict(cookies)
        # for s in cookies:
        #     yield 'key=',s,'values=',cookies[s]
        headers={'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36',
                 'Accept':"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
                 'Connection':'keep-alive',
                 'Cache-Control':'max-age=0',
                 'Upgrade-Insecure-Requests':'1',
                 'Accept-Encoding':'gzip, deflate, br',
                 'Accept-Language':'zh-CN,zh;q=0.9'}
        for url in urls:
            yield scrapy.Request(url=url, callback=self.parse,headers=headers,cookies=cookie_dict)

    def parse(self, response):
        # 获取response返回值
        # yield {
        #     'text': response.xpath('//a[@class="video"]/text()').extract(),  # 从标签里面取出数据
        #     # 'author': w980.css('span small.author::text').extract_first(),   #多标签取数据
        #     # 'tags': w980.css('div.tags a.tag::text').extract_first()
        # }
        # for w980 in  response.css('div.w980 div.links div.bar'):   #遍历一个循环获得对象
        #     yield {
        #         'text':w980.css('ul li a::text').extract_first(),    #从标签里面取出数据
        #         # 'author': w980.css('span small.author::text').extract_first(),   #多标签取数据
        #         # 'tags': w980.css('div.tags a.tag::text').extract_first()
        #     }
            # print response.css('title::text').extract_first()
            page = response.url.split("/")[-2]
            filename = 'quotes-%s.html' % page
            with open(filename, 'wb') as f:    #创建一个文件
                f.write(response.body)   #把数据写入到文件
            self.log('Saved file %s' % filename)


# if __name__=="__main__":


