# -*- coding: utf-8 -*-
import scrapy


class TaoSpider(scrapy.Spider):
    name = 'tao'
    allowed_domains = ['dbsgw.cn']
    start_urls = ['https://blog.dbsgw.cn/admin/']

    def start_requests(self):
        url = self.start_urls[0]
        temp = 'UM_distinctid=17284a499d56b8-05bea734f5f8b4-31710157-12b178-17284a499d6609; Hm_lvt_30f7cf9424f966a7bb75b6095eee923c=1591856734,1592558770,1592558824,1594032486; Hm_lvt_fc5cb0ed53567ad9178d5cd0d38fc309=1594692234,1594692794,1594802898,1595132700; Hm_lpvt_fc5cb0ed53567ad9178d5cd0d38fc309=1595132700; CNZZDATA1278526282=1529325712-1591363279-%7C1595131674; PHPSESSID=ildrggd9e07eed5rougvc430k1; EM_AUTHCOOKIE_95yUDyuGoTu7WYejbLXmH8m6O630lMsd=admin%7C%7Ca7e91e464d86db572e6e588ef9dd5815'
        cookies = {data.split('=')[0]: data.split('=')[-1] for data in temp.split('; ')}
        yield scrapy.Request(url=url, cookies=cookies, callback=self.parse)

    def parse(self, response):
        print(response.body)
