# -*- coding: utf-8 -*-

import logging

import scrapy
from scrapy.http import Request

from papa.items import WxGzhArticle

LOG = logging.getLogger(__name__)


def fix_xa0(text):
    text = text.replace('\xa0 ', ' ')
    text = text.replace('\xa0\n', '\n')
    text = text.replace('\xa0', ' ')
    text = text.strip()
    return text


class GzhArticlesSpiderBase(scrapy.Spider):
    allowed_domains = ['mp.weixin.qq.com']

    def parse(self, response):
        urls = response.xpath('//*[@id="js_content"]/p/a')
        for url in urls:
            # LOG.debug(url.get())
            article = WxGzhArticle()
            article['url'] = url.attrib['href']
            # &nbsp; 会变成 \xa0，什么鬼？
            article['name'] = fix_xa0(url.root.text_content())
            # LOG.debug(article)
            yield Request(article['url'], callback=self.parse_article, meta={'article': article})

    def parse_article(self, response):
        article = response.meta['article']
        article['wechat_id'] = response.xpath('//*[@id="js_profile_qrcode"]/div/p[1]/span')[0].root.text
        article['wechat_name'] = response.xpath('//*[@id="js_profile_qrcode"]/div/strong')[0].root.text
        article['time'] = response.xpath('//*[@id="publish_time"]')[0].root.text
        content_ele = response.xpath('//*[@id="js_content"]')
        content_p_ele_list = content_ele.css('p')
        paragraphs = []
        for content_p_ele in content_p_ele_list:
            paragraphs.append(content_p_ele.root.text_content())
        article['content'] = fix_xa0('\n'.join(paragraphs))
        # LOG.debug(article['content'])
        yield article


class BtscSpider(GzhArticlesSpiderBase):
    name = 'BeiTaiShuoChe'
    start_urls = [
        'https://mp.weixin.qq.com/s?__biz=MzIwMTAzNjc4OA==&mid=207162676&idx=1&sn=7b41a1fcc0fc703e75cc5b690106036c',  # 备胎说车：汽车干货大全
    ]


class BtscImagesSpider(GzhArticlesSpiderBase):
    name = 'BeiTaiShuoCheImages'
    start_urls = [
        'https://mp.weixin.qq.com/s/dZpFJA2b-j0nsoVhzG3KkQ',  # 备胎说车图片干货
    ]
