import re

import scrapy

from MySpider.items.huibooksitems.HuiBooksIndexItem import HuiBooksIndexItem
from MySpider import mydb
from MySpider.spiders.huibooksspiders import spider_parameters


class HuiBooksIndexSpider(scrapy.Spider):
    """
    https://www.huibooks.com/
    这个网站必须进入到具体类别中，才有页码显示，因此必须按类别分别爬取
    """
    # 爬取的页面的url里的计数

    name = "HuiBooksIndexSpider"
    allowed_domains = spider_parameters['allowed_domains'].split("|")
    table_fields = spider_parameters['index_table_fields']
    insert_sql = spider_parameters['index_insert_sql']
    unique_key = spider_parameters['unique_key']

    page = 1
    PAGE_CRAWL_INTEND_TO = 11

    quick_mode = True
    categories = {'人文社科': 'sk',
                  '历史传记': 'zj',
                  '小说文学': 'wx',
                  '经济管理': 'gl',
                  '励志成功': 'cg',
                  '学习教育': 'jy',
                  '生活时尚': 'ss',
                  '套装合集': 'zp'
                  }
    category = 'ss'
    start_urls = ["https://www.huibooks.com/%s" % category]
    index_page_patten = spider_parameters['index_page_patten']
    top_level_node_xpath = '//div[@id="post-list"]/ul/li//a[@href and @rel][1]/@href'
    url_id_in_url_patten = re.compile(r'(?<=huibooks.com/)\d*(?=.html)')

    table_name = 'HuiBooksIndex'
    use_cookie = False

    unique_id_duplicate_count = 0

    def parse(self, response, **kwargs):
        urls = response.xpath(self.top_level_node_xpath).extract()
        page_poz = 1
        for url in urls:
            item = HuiBooksIndexItem()
            item['url_id'] = self.url_id_in_url_patten.search(url).group(0)
            item['page'] = self.page
            item['page_poz'] = page_poz
            item['category'] = self.category
            if self.quick_mode or not mydb.is_unique_id_duplicated(self.table_name, 'url_id', item['url_id']):
                yield item
            else:
                self.unique_id_duplicate_count += 1
                print('网页%s数据库已有，不再重复爬取\n' % url)
            page_poz += 1
        self.page += 1
        if self.page <= self.PAGE_CRAWL_INTEND_TO and self.unique_id_duplicate_count < 5:
            url = self.index_page_patten % (self.category, self.page)
            print('开始下一页，第%s页' % self.page)
            yield scrapy.Request(
                url=url,
                callback=self.parse
            )
        else:
            print('到达最后一页或进入重复区域，停止爬取')
