import scrapy
from scrapy.http import Request, FormRequest


# 其实可以不用items,piplines和setting这些爬虫文件，最核心的还是创建和爬虫文件
class CrawlDoubanSpider(scrapy.Spider):
    name = 'crawl_douban'
    allowed_domains = ['douban.com']

    # start_urls = ['http://douban.com/']
    # https://accounts.douban.com/j/mobile/login/basic这个地址不对换成 https: // accounts.douban.com / passport / login
    def start_requests(self):
        headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:92.0) Gecko/20100101 Firefox/92.0"}
        return [Request("https://accounts.douban.com/passport/login", callback=self.parse, headers=headers,
                        meta={"cookiejar": 1})]

    def parse(self, response):
        print("若没有验证码")
        data = {
            "name": "18768788978",
            "remember": "true",
            "password": "FC18768788978"
        }
        print("登录中")
        return [
            FormRequest.from_response(  # 使用这个方法得保证爬取地址中有表单
                response,
                meta={"cookiejar": response.meta["cookiejar"]},
                headers={
                    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:92.0) Gecko/20100101 Firefox/92.0"},
                formdata=data,
                callback=self.next
            )]

    def next(self, response):
        print("登录完成和已爬取")
        title = response.xpath("/html/head/title").extract()
        print(title)
