import re

import scrapy

from MySpider.items.IndexItem import IndexItem
from MySpider import mydb
from MySpider.spiders.wqlgamespiders import spider_parameters


class WqlGameIndexSpider(scrapy.Spider):
    """
    IndexSpider类仅负责爬取首页所有节点，痛点在于如何避免重复爬取，并且需要考虑scrapy架构的乱序爬取机制
    """
    # 爬取的页面的url里的计数

    name = "WqlGameIndexSpider"
    allowed_domains = spider_parameters['allowed_domains'].split("|")
    table_fields = spider_parameters['index_table_fields']
    insert_sql = spider_parameters['index_insert_sql']
    unique_key = spider_parameters['unique_key']

    quick_mode = True
    page = 1
    PAGE_CRAWL_INTEND_TO = 51
    start_urls = ["https://www.688qf.com/category/djyx"]
    index_page_patten = spider_parameters['index_page_patten']
    top_level_node_xpath = '//div[@class="col-lg-6 col-12"]/article//div[@class="placeholder"]/a/@href'
    url_id_in_url_patten = re.compile(r'(?<=688qf.com/)\d*(?=/)')

    table_name = 'WqlGameIndex'

    unique_id_duplicate_count = 0
    use_cookie = False

    def parse(self, response, **kwargs):
        urls = response.xpath(self.top_level_node_xpath).extract()
        page_poz = 1
        for url in urls:
            item = IndexItem()
            item['url_id'] = self.url_id_in_url_patten.search(url).group(0)
            item['page'] = self.page
            item['page_poz'] = page_poz
            if self.quick_mode:
                yield item
            elif not mydb.is_unique_id_duplicated(self.table_name, 'url_id', item['url_id']):
                yield item
                print('开始爬取网页%s\n' % url)
            else:
                self.unique_id_duplicate_count += 1
                print('网页%s数据库已有，不再重复爬取\n' % url)
            page_poz += 1
        self.page += 1
        if self.page <= self.PAGE_CRAWL_INTEND_TO and self.unique_id_duplicate_count < 5:
            url = self.index_page_patten % self.page
            print('开始下一页，第%s页' % self.page)
            yield scrapy.Request(
                url=url,
                callback=self.parse,
            )
        else:
            print('到达最后一页或进入重复区域，停止爬取')