# -*- coding: utf-8 -*-
import scrapy

import re
import json
import urllib
import unicodedata

from scrapy_redis.spiders import RedisCrawlSpider


class LovdSpiderSpider(RedisCrawlSpider):
    name = 'lovd_variants_spider'

    base_url = 'https://databases.lovd.nl/shared/variants/in_gene?'

    # scrapy_redis的调度器队列的名称，最终我们会根据该队列的名称向调度器队列中扔一个起始url
    # redis_key = "lovd_variants_spider:start_urls"

    # start_urls = [
    #     'https://databases.lovd.nl/shared/variants/in_gene?viewlistid=CustomVL_IN_GENE&object=Custom_ViewList&object_id=Transcript,VariantOnTranscript,VariantOnGenome&id=0&order=geneid,ASC&page_size=100&page=1'
    # ]

    def __init__(self, **kwargs):
        self.page = int(kwargs.get('page', '1'))
        self.page_size = int(kwargs.get('page_size', 100))
        self.page_max = int(kwargs.get('page_max', 99999999))

    def start_requests(self):

        params = {
            'viewlistid': 'CustomVL_IN_GENE',
            'object': 'Custom_ViewList',
            'object_id': 'Transcript,VariantOnTranscript,VariantOnGenome',
            'id': 0,
            'order': 'geneid,ASC',
            'page_size': self.page_size,
            'page': self.page
        }
        url = self.base_url + urllib.urlencode(params)
        yield scrapy.Request(url, meta={'params': params})

    def parse(self, response):

        table = response.css('table#viewlistTable_CustomVL_IN_GENE')

        if not response.meta.get('title'):  # 不变的信息，只需解析一次
            response.meta['total_page'] = response.css('#viewlistPageSplitText_CustomVL_IN_GENE').re('on (\d+) pages')[0]
            response.meta['title'] = [
                unicodedata.normalize('NFKC', each.strip())
                for each in table.css('thead tr th ::text').extract()
                if each.strip()
            ]

        self.logger.info('>>> crawling page: {}/{}'.format(response.meta['params']['page'], response.meta['total_page']))

        for tr in table.css('tr.data'):
            row = [unicodedata.normalize('NFKC', each.strip())
                   for each in tr.css('td ::text').extract() if each.strip()]
            yield dict(zip(response.meta['title'], row))

        not_last_page = response.css('.pagesplit_nav tr th')[-1].attrib.get('class') != 'inactive'

        if not_last_page and response.meta['params']['page'] <= self.page_max:
            response.meta['params']['page'] += 1
            url = self.base_url + urllib.urlencode(response.meta['params'])
            yield response.follow(url, meta=response.meta)
