import re

import scrapy
import pymysql
import emoji

from MySpider.items.zhishikooitems.ZhiShiKooDetailItem import ZhishikooDetailItem
from bs4 import BeautifulSoup
from scrapy.utils.project import get_project_settings
import redis


def book_info_to_item(info_name, info_value, item):

    info = dict(zip(info_name, info_value))
    # 使用info.get当没有键的时候不报错，而是以默认值返回
    item['author'] = info.get('作者', '')
    item['publisher'] = info.get('出版社', '')
    item['pubdate'] = info.get('出版年', '')
    item['ASIN'] = info.get('ASIN', '')
    item['ISBN'] = info.get('ISBN', '')
    item['sub_bookname'] = info.get('副标题', '')
    item['transfer'] = info.get('译者', '')
    item['ori_bookname'] = info.get('原作名', '')


def strips(target_string, strip_strings):
    result_string = target_string
    for s in strip_strings:
        result_string = result_string.strip(s)
    return result_string


class ZhishikooDetailSpider(scrapy.Spider):
    # 爬取的页面的url里的计数
    name = "ZhishikooDetailSpider"
    allowed_domains = ["zhishikoo.com"]
    page = 1
    page_url = 'https://book.zhishikoo.com/books/category/news/page/%s'
    PAGE_CRAWL_INTEND_TO = 0
    start_urls = ["https://book.zhishikoo.com/books/category/news/page/%s" % page]

    img_patten = re.compile(r'(?<=zhishikoo.com/wp-content/uploads/).*')
    baidu_patten = re.compile(r'(?<=baidu.com/s/).*')
    lanzou_patten = re.compile(r'(?<=lanzou\w.com/).*')
    title_strip = '(epub+azw3+mobi)'

    top_level_node_xpath = '//div[@class="post grid"]/div[@class="img"]/a[1]/@href'
    webid_in_url_patten = re.compile(r'(?<=\w/)\d*(?=.html)')

    detail_title_xpath = '//h1[@class="article-title"]/text()'

    my_redis = redis.Redis(host="localhost", port=6379)
    # 程序里爬取页面的计数
    page_crawled = 0

    table_name = 'zhishikoo'
    unique_key = 'webid'
    table_fields = ['webid', 'title', 'page', 'page_poz', 'img', 'sub_bookname', 'ori_bookname', 'pub_time',
                    'category',
                    'author', 'transfer', 'publisher', 'pubdate', 'ISBN', 'ASIN', 'author_info', 'book_info',
                    'download_href', 'download_password']

    settings = get_project_settings()

    def __init__(self, **kwargs):
        # scrapy crawl XXX 是搜索的这个名称

        super().__init__(**kwargs)
        self.connect = pymysql.connect(host=self.settings['MYSQL_HOST'],
                                       port=self.settings['MYSQL_PORT'],
                                       user=self.settings['MYSQL_USER'],
                                       password=self.settings['MYSQL_PASSWORD'],
                                       database=self.settings['MYSQL_DBNAME'])
        # connect.autocommit(True)
        self.cursor = self.connect.cursor()
        self.query_mysql_into_redis()

    def parse(self, response, **kwargs):
        self.parse_this_page(response)
        self.parse_next_page()

    def query_mysql_into_redis(self):
        querystring = 'select page,count(*) as pagecount from zhishikoo GROUP BY page having pagecount<30'
        self.cursor.execute(querystring)
        rows = self.cursor.fetchall()
        self.my_redis.delete('pages')
        for row in rows:
            self.my_redis.lpush('pages', row[0])

        querystring = 'select %s from %s' % (self.unique_key, self.table_name)
        self.cursor.execute(querystring)
        rows = self.cursor.fetchall()
        self.my_redis.delete('webid')
        for row in rows:
            self.my_redis.sadd('webid', row[0])

    def parse_this_page(self, response, is_parse_detail=True):
        urls = response.xpath(self.top_level_node_xpath).extract()

        page_poz = 1
        for url in urls:
            # 调试用
            # if page_poz == 9:
            # webid = re.search(r'(?<=\w/)\d*(?=.html)', url).group(0)
            webid = self.webid_in_url_patten.search(url).group(0)

            if is_parse_detail and not self.my_redis.sismember('webid', webid):
                yield scrapy.Request(
                    url=url,
                    callback=self.parse_detail,
                    # 向后传递消息
                    meta={
                        "page": self.page,
                        "page_poz": page_poz,
                        "webid": webid
                    }
                )

            page_poz += 1
        self.page += 1

    def parse_next_page(self):
        self.page_crawled += 1
        if self.page_crawled <= self.PAGE_CRAWL_INTEND_TO:
            url = self.page_url % self.my_redis.rpop('pages').decode('utf-8')
            yield scrapy.Request(
                url=url,
                callback=self.parse,
            )

    def parse_detail(self, response):
        item = ZhishikooDetailItem()
        item['webid'] = response.meta["webid"]
        # 爬取规则固定的字段：
        item['page'] = response.meta["page"]
        item['page_poz'] = response.meta["page_poz"]
        item['title'] = response.xpath('//h1[@class="article-title"]/text()').extract_first().strip(self.title_strip)
        item['pub_time'] = response.xpath('//i[@class="dripicons dripicons-clock"]/parent::span/text()') \
            .extract_first()
        item['category'] = response.xpath('//i[@class="dripicons dripicons-folder"]/following-sibling::a/text()') \
            .extract_first()

        content_div = response.xpath('//div[@class="article-content"]')
        # 注意以.开始，表示以当前节点开始，否则仍将以整个文档为起始点
        img = content_div.xpath('.//figure[1]//img/@src').extract_first()
        # item['img'] = re.search(self.img_re, img).group(0)
        item['img'] = self.img_patten.search(img).group(0)
        author_info = content_div.xpath('.//h2[contains(text(),"作者简介")]/following-sibling::p').extract_first()

        # 有时候网页没有作者简介
        if author_info is not None:
            # 使用bs去除html标记
            author_info = BeautifulSoup(author_info, 'html.parser').get_text()
            item['author_info'] = "".join(emoji.replace_emoji(author_info).split())
        else:
            item['author_info'] = ''

        book_info = content_div.xpath(
            './/h2/span[contains(text(),"内容简介")]/parent::h2/following-sibling::p').extract_first()
        # 有时候网页没有内容简介？不晓得会不会发生
        if book_info is not None:
            # 使用bs去除html标记
            book_info = BeautifulSoup(book_info, 'html.parser').get_text()
            item['book_info'] = "".join(emoji.replace_emoji(book_info).split())
        else:
            item['book_info'] = ''

        download_href = content_div.xpath('.//a[@href]/@href').extract_first()
        if self.baidu_patten.search(download_href) is not None:
            item['download_href'] = self.baidu_patten.search(download_href).group(0)

            # if re.search(self.baidu_re, download_href) is not None:
            # item['download_href'] = re.search(self.baidu_re, download_href).group(0)
            # zhishikoo这个网站的detail页面有几个模板，不同的模板使用不同的策略爬取，
            # 以下为不同爬取规则爬取的字段，采用不同的template进行爬取
            # 以.//div/text() 下的text有没有提取码为依据进行判定
            if '提取码' in ''.join(content_div.xpath('.//div/text()').extract()):
                self.detail_template1(content_div, item)
            else:
                self.detail_template2(content_div, item)
        elif self.lanzou_patten.search(download_href) is not None:
            item['download_href'] = 'lanzou_' + self.lanzou_patten.search(download_href).group(0)
            item['download_password'] = ''
        # elif re.search(self.lanzou_re, download_href) is not None:
        # item['download_href'] = 'lanzou_' + re.search(self.lanzou_re, download_href).group(0)
        # item['download_password'] = ''

        yield item

    def detail_template1(self, content, item):
        info_name = [x.strip('：') for x in content.xpath('.//figure[2]//span/text()').extract()]
        # <br>分隔的内容是在上层节点下的text节点，获取text节点后，用join连接去掉首尾的\n,并以\n进行分割，就得到分段的信息
        info_value = ''.join(content.xpath('.//figure[2]/text()').extract()).strip('\n').split('\n')
        book_info_to_item(info_name, info_value, item)

        # 提取码在<br>段里，所以用上层div下的text节点得到所有后再进行筛选
        download_password = [x for x in content.xpath('.//div/text()').extract() if "提取码" in x][0].strip(' 提取码：')
        item['download_password'] = download_password

    def detail_template2(self, content, item):
        info = content.xpath('.//figure[2]//span/text()').extract()
        # info_name = [x.split(": ")[0] for x in info]
        info_name = [re.split(r': |：', x)[0] for x in info]
        # TODO 解决类似作者： 后面缺失情况
        # info_value = [x.split(": ")[1] for x in info]
        info_value = [re.split(r': |：', x)[1] if len(re.split(r': |：', x)) > 1 else '' for x in info]

        book_info_to_item(info_name, info_value, item)

        download_password = [x for x in content.xpath('.//p[last()]/text()').extract() if "提取码" in x][0].strip(
            ' 提取码：')
        item['download_password'] = download_password

