# -*- coding: utf-8 -*-
import datetime

from scrapy.http import FormRequest
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule

from ..items import MyItemLoader, LoginItem


class LoginSpider(CrawlSpider):
    """
    登录爬虫演示。运行：scrapy crawl login
    """
    name = 'login'
    allowed_domains = ['localhost']

    # Start with a login request
    def start_requests(self):
        return [
            FormRequest(
                "http://localhost:5000/auth/login",
                formdata={"username": "test", "password": "test"}
            )]

    rules = (
        Rule(LinkExtractor(allow=r'/create'), callback='parse_item', follow=True),
    )

    def parse_start_url(self, response):
        self.log("------------------------index page----------------------------")
        for quote in response.xpath('//section[@class="content"]/article[@class="post"]'):
            # Create the loader using the selector
            l = MyItemLoader(item=LoginItem(), selector=quote)

            # xpath
            l.add_xpath('title', './header/div/h1/text()')
            l.add_xpath('author', './header/div/div[@class="about"]/text()')
            l.add_xpath('date', './header/div/div[@class="about"]/text()')
            l.add_xpath('content', './p[@class="body"]/text()')

            yield l.load_item()

    def parse_item(self, response):
        self.log("------------------------new post----------------------------")
        # Create the ItemLoader
        l = MyItemLoader(item=LoginItem(), response=response)
        l.add_xpath('title', '//header/h1/text()')
        l.add_xpath('author', '//form/label[1]/text()')
        l.add_value('date', '2019-12-12')
        l.add_xpath('content', '//form/label[2]/text()')
        return l.load_item()
