# -*- coding: utf-8 -*-
from scrapy.http import Request, FormRequest, HtmlResponse
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from scrapy.conf import settings
from DouBanSpider.items import DoubanspiderItem
import requests
import re


class DoubanSpider(CrawlSpider):
    name = 'douban'
    allowed_domains = ['douban.com']
    start_urls = ['https://book.douban.com/tag/?view=cloud']

    rules = (
        Rule(LinkExtractor(allow=r'https://book.douban.com/tag/[^?]+?$'), follow=True),
        Rule(LinkExtractor(allow=r'https://book.douban.com/tag/[^?]+?\?start=\d+?&type=\w+?'), follow=True),
        Rule(LinkExtractor(allow=r'https://book.douban.com/subject/\d+?/$'), callback='parse_item', follow=False),
    )

    def parse_item(self, response):
        item = DoubanspiderItem()

        # 书名
        item['name'] = self.get_name(response)
        # 链接
        item['link'] = response.url
        # 评分
        item['grade'] = self.get_grade(response)
        # 评价人数
        item['grade_num'] = self.get_grade_num(response)
        # 作者
        item['author'] = self.get_author(response)
        # 出版社
        item['press'] = self.get_press(response)

        yield item

    def start_requests(self):
        """请求登陆界面"""
        return [
            Request(
                url='https://accounts.douban.com/login',
                meta={'cookiejar': 1},
                callback=self.post_login,
            )
        ]

    def post_login(self, response):
        """发送 POST 请求模拟登陆"""
        headers = {
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.13; rv:50.0) Gecko/20100101 Firefox/50.0',
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
            'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
            'Referer': 'https://accounts.douban.com/login?source=book',
            'Connection': 'close',
            'Content-Type': 'application/x-www-form-urlencoded'
        }
        formdata = {
            'source': 'book',
            'redir': 'https%3A%2F%2Fbook.douban.com%2Ftag%2F%3Fview%3Dtype%26icn%3Dindex-sorttags-all',
            'form_email': settings['DOUBAN_NAME'],
            'form_password': settings['DOUBAN_PASSWORD'],
            'login': '%E7%99%BB%E5%BD%95',
        }

        # 判断是否有验证码
        captcha_id = response.xpath('//input[@name="captcha-id"]/@value').extract()
        if captcha_id:
            formdata['captcha-id'] = captcha_id[0]
            captcha_image_url = response.xpath('//img[@id="captcha_image"]/@src').extract()
            with open('captcha_image.jpg', 'wb') as file:
                file.write(requests.get(captcha_image_url[0]).content)
            captcha_solution = input('请输入验证码：')
            formdata['captcha-solution'] = captcha_solution

        return [
            FormRequest.from_response(
                response,
                url='https://accounts.douban.com/login',
                meta={'cookiejar': response.meta['cookiejar']},
                headers=headers,
                formdata=formdata,
                callback=self.after_login,
            )
        ]

    def after_login(self,response):
        for url in self.start_urls:
            yield Request(url, meta={'cookiejar': response.meta['cookiejar']})

    def _requests_to_follow(self, response):
        """重写加入cookiejar的更新"""
        if not isinstance(response, HtmlResponse):
            return
        seen = set()
        for n, rule in enumerate(self._rules):
            links = [l for l in rule.link_extractor.extract_links(response) if l not in seen]
            if links and rule.process_links:
                links = rule.process_links(links)
            for link in links:
                seen.add(link)
                r = Request(url=link.url, callback=self._response_downloaded)
                # 下面这句是我重写的
                r.meta.update(rule=n, link_text=link.text, cookiejar=response.meta['cookiejar'])
                yield rule.process_request(r)

    def get_name(self, response):
        """解析书名"""
        name = response.xpath(".//*[@id='wrapper']/h1/span/text()").extract()

        if name:
            return name[0]
        else:
            return name

    def get_grade(self, response):
        """解析评分"""
        grade = response.xpath(".//*[@id='interest_sectl']/div/div[2]/strong/text()").extract()

        if grade:
            return grade[0].strip()
        else:
            return grade

    def get_grade_num(self, response):
        """解析评分人数"""
        grade_num = response.xpath(".//*[@id='interest_sectl']/div/div[2]/div/div[2]/span/a/span/text()").extract()

        if grade_num:
            return grade_num[0].strip()
        else:
            return grade_num

    def get_author(self, response):
        """解析作者"""
        author = response.xpath(".//*[@id='info']/a[1]/text()").extract()
        # 采用第二种解析规则
        if not author:
            author = response.xpath(".//*[@id='info']/span[1]/a/text()").extract()

        if author:
            return author[0].strip().replace(' ', '').replace('\n', '')
        else:
            return author

    def get_press(self, response):
        """解析出版社"""
        pat_pl = '<span class="pl">出版社:</span>(.*?)<br/>'
        pl = re.compile(pat_pl, re.S).findall(response.body.decode('utf-8', 'ignore'))

        if pl:
            return pl[0].strip()
        else:
            return pl






















