# -*- coding: utf-8 -*-
import scrapy

import json
import urllib
import unicodedata

from scrapy_redis.spiders import RedisCrawlSpider


class LovdSpiderSpider(RedisCrawlSpider):
    name = 'lovd_redis_spider'

    # scrapy_redis的调度器队列的名称，最终我们会根据该队列的名称向调度器队列中扔一个起始url
    # redis_key = "lovd_redis_spider:start_urls"

    # allowed_domains = ['databases.lovd.nl']
    base_url = 'https://databases.lovd.nl'

    # start_urls = []

    def __init__(self, **kwargs):
        self.gene = kwargs.get('gene')

    def start_requests(self):

        if self.gene:
            for gene in self.gene.split(','):
                url = '{}/shared/variants/{}'.format(self.base_url, gene)
                # self.start_urls.append(url)
                yield scrapy.Request(url, meta={'gene': gene, 'page': 1}, dont_filter=True)
        else:
            url = self.base_url + '/shared/ajax/get_gene_switcher.php'
            yield scrapy.Request(url, callback=self.get_all_genelist, dont_filter=True)

    def get_all_genelist(self, response):

        data = json.loads(response.body)['data']
        total = len(data)

        for n, each in enumerate(data, 1):
            gene = each['value']
            self.logger.info('>>> add a gene {gene} ({n}/{total})'.format(**locals()))
            url = '{}/shared/variants/{}'.format(self.base_url, gene)
            yield scrapy.Request(url, meta={'gene': gene, 'page': 1}, dont_filter=True)

            # if n > 10:
            #     break

    def parse(self, response):
        self.logger.info('\033[32m>>> crawling {gene} page {page}\033[0m'.format(**response.meta))
        tableid = 'table#viewlistTable_CustomVL_VOT_VOG_{}'.format(response.meta['gene'])
        table = response.css(tableid)

        skip = False
        if not response.meta.get('search_transcriptid'):
            try:
                response.meta['title'] = [unicodedata.normalize('NFKC', each.strip()) for each in table.css('thead tr th ::text').extract() if each.strip()]
                response.meta['search_transcriptid'] = response.css('input[name="search_transcriptid"]').attrib['value']
            except KeyError:
                self.logger.warning('no variants found for this gene: {gene}'.format(**response.meta))
                skip = True

        if not skip:
            for tr in table.css('tr.data'):
                row = [unicodedata.normalize('NFKC', each.strip()) for each in tr.css('td ::text').extract() if each.strip()]
                context = dict(zip(response.meta['title'], row))
                context['gene'] = response.meta['gene']
                yield context

            last_page = response.css('.pagesplit_nav tr th')

            if last_page and last_page[-1].attrib.get('class') != 'inactive':
                response.meta['page'] += 1
                # url = self.base_url + '/shared/ajax/viewlist.php'
                # url = '{base_url}/shared/variants/{gene}?'.format(self.base_url, response.meta['gene'])
                params = {
                    'id': response.meta['gene'],
                    'viewlistid': 'CustomVL_VIEW_' + response.meta['gene'],
                    'page': response.meta['page'],
                    'page_size': '100',
                    'object': 'Custom_ViewList',
                    'search_transcriptid': response.meta['search_transcriptid'],
                    'object_id': 'VariantOnTranscript,VariantOnGenome,Screening,Individual',
                }
                url = '{}/shared/variants/{}?'.format(self.base_url, response.meta['gene'])
                url += urllib.urlencode(params)
                self.logger.info('>>> URL: ' + url)

                # self.start_urls.append(url)

                yield response.follow(url, meta=response.meta)


