# -*- coding: utf-8 -*-
import scrapy

from zhihu_longin.items import CtospiderItem


class A51ctoSpider(scrapy.Spider):
    name = '51cto'
    allowed_domains = ['51cto.com']
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:58.0) Gecko/20100101 Firefox/58.0',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
        'Accept-Encoding': 'gzip, deflate, br',
        'Referer': 'http://blog.51cto.com',
        'Content-Type': 'application/x-www-form-urlencoded',
    }

    def start_requests(self):
        urls = ['http://home.51cto.com/index']
        for url in urls:
            yield scrapy.Request(url=url, callback=self.cto_login, meta={'cookiejar': 1})

    def cto_login(self, response):
        # 模拟登陆
        csrf = response.css("input[name='_csrf']::attr(value)").extract_first()
        yield scrapy.FormRequest.from_response(
            response,
            url='http://blog.51cto.com/linuxliu?type=1',
            meta={'cookiejar': response.meta['cookiejar']},
            formdata={
                'LoginForm[rememberMe]': '0',
                'LoginForm[username]': 'kuangshp',
                'LoginForm[password]': '****',
                '_csrf': csrf,
            },
            callback=self.after_login,
            dont_click=True
        )

    def after_login(self, response):
        resps = response.css("ul.artical-list li")
        for resp in resps:
            item = CtospiderItem()
            item['title_url'] = resp.css("a.tit::attr(href)").extract_first()
            item['title'] = resp.css("a.tit::text").extract_first().strip()
            yield item
        # 获取分页
        next = response.css('.pagination li.next a::attr(href)').extract_first()
        if next:
            yield scrapy.Request(url=next, callback=self.after_login)
