import scrapy
import sys
import importlib
import logging
import datetime
import time
import re
import hashlib
from urllib.parse import urlparse
from scrapy_redis.spiders import RedisSpider
from spider.items.Items import PageItem
from spider.settings import EXTENDS_SPIDER

importlib.reload(sys)

''' elcano-notas 网站相关页面爬虫 '''


class BasicSpider(EXTENDS_SPIDER):
    # 爬虫名称
    name = 'elcano-notas'

    # 允许访问的域
    allowed_domains = ['www.realinstitutoelcano.org']

    # 抓取网页域名
    host = 'http://www.realinstitutoelcano.org'

    # 抓取页面
    paths = [
        '/wps/portal/rielcano_es/prensa/notas',
    ]

    # 初始抓取的URL绝对路径
    start_urls = []

    # 爬取日期字符串，对于增量爬取方式，需要比对文章发布日期和爬取日期
    todayStr = ''

    # 是否过滤重复数据，对于不同页面的相同文章，是否只记录一条数据，由settings.py的DEDUPLICATED配置
    dontFilter = False

    # 是否增量爬取，对于增量爬取方式，如果数据旧于爬取时间，则不再继续爬取，由settings.py的DOWNLOAD_TODAY配置
    downloadToday = False

    downloadPath = ''

    count = 0

    def __init__(self):
        # 放在__init__的原因是python3修改了类变量的作用域，不能在列表表达式里引用其他类变量！！！
        self.paths = [self.host + path for path in self.paths]

        self.start_urls = self.paths.copy()

        # 对于paths[index]页面，当前正在抓取第pages[index]页数据，从0开始
        self.pages = [1 for path in self.paths]

        # 对于paths[index]页面，最大页数为totals[index]
        self.totals = [0 for path in self.paths]

        # 对于paths[index]页面，是否终止抓取，对于增量爬取方式，如果数据旧于爬取时间，则不再继续爬取
        self.exits = [False for path in self.paths]

    # 初始化启动请求
    def start_requests(self):
        # 是否增量爬取
        self.downloadToday = self.settings.get('DOWNLOAD_TODAY')

        # 文件下载路径
        self.downloadPath = self.settings.get('FILES_STORE')

        # 爬虫日期
        self.todayStr = self.settings.get('TODAY_STR')

        # 是否去重
        self.dontFilter = not bool(self.settings.get('DEDUPLICATED'))
        for url in self.start_urls:
            yield scrapy.Request(url, callback=self.parse, dont_filter=self.dontFilter, cb_kwargs={'isFirstPage': True})

    # 爬取文章列表页
    def parse(self, response, isFirstPage):
        # logging.info('====================parse')
        urlObject = urlparse(response.url)
        host = urlObject.scheme + '://' + urlObject.hostname

        pagination = response.css('.pagination ul li span a::attr(href)')
        if isFirstPage and pagination:
            # logging.info('first==========================')
            index = self.start_urls.index(response.url)
            self.paths[index] = host + re.sub(r'=\d+$', '={}', pagination.extract_first())
            # logging.info(self.paths[index])
            self.totals[index] = int(re.findall(r'=(\d+)$', pagination.extract()[-1])[0])
            logging.info(self.paths[index])
            # logging.info(response.css('.pagination ul li span a::attr(href)').extract())
            # logging.info(response.url)
            logging.info('=========>Page Total: ' + str(self.totals[index]))
            logging.info('=========>Page Patten: ' + str(self.paths[index]))
            yield scrapy.Request(self.paths[index].format(self.pages[index]), callback=self.parse,
                                 dont_filter=self.dontFilter, cb_kwargs={'isFirstPage': False})
        else:
            # logging.info('no first==========================')
            url = re.sub(r'=\d+$', '={}', response.url)
            index = self.paths.index(url)
            logging.info('=========>Current Page: ' + str(self.pages[index]))

            # 适配不同页面的细微结构差异
            # container = response.css('.main-content div:nth-child(3)')
            container = response.css('.main-content')

            # 获取文章列表
            category = container.css('div.section::text').extract_first()
            h1Tags = container.css('h1')
            h2 = ''.join(container.css('::text').extract())
            h2Tags = re.findall(r'(\d{1,2}/\d{1,2}/\d{4})', h2)

            count = len(h1Tags)

            logging.info('=========>Page Content Count (h1): ' + str(len(h1Tags)))
            # logging.info('=======count: ')
            # logging.info(count)
            for i in range(count):
                item = PageItem()
                item['page_title'] = h1Tags[i].css('a::text').extract_first().strip()
                item['page_url'] = host + h1Tags[i].css('a::attr(href)').extract_first()

                publishDate = time.strptime(h2Tags[i], '%d/%m/%Y')
                item['page_publish_date'] = time.strftime('%Y-%m-%d', publishDate)
                item['page_publish_timestamp'] = time.mktime(publishDate)
                item['page_tags'] = None
                item['page_lang'] = 'es'

                # 增量爬取判断
                # if self.downloadToday and self.todayStr > item['page_publish_date']:
                #     self.exits[index] = True
                #     logging.info('<=============已开启增量下载，没有更多的新增数据，退出爬虫！=============>')
                #     break

                item['page_author'] = None
                item['page_category'] = category
                item['page_from_site'] = self.name
                item['page_crawl_time'] = str(int(time.time()))

                # 进入文章详情页面爬取数据
                # logging.info('detail++++++')
                # logging.info(item['page_url'])

                yield scrapy.Request(item['page_url'], callback=self.parsePaper, meta={'item': item},
                                     dont_filter=self.dontFilter)

            # 进入下一页爬取数据
            self.pages[index] += 1
            if not self.exits[index] and self.pages[index] <= self.totals[index]:
                yield scrapy.Request(self.paths[index].format(str(self.pages[index])), callback=self.parse,
                                     dont_filter=self.dontFilter, cb_kwargs={'isFirstPage': False})

    # 爬取文章详情页
    def parsePaper(self, response):
        # 初始化下载Item
        urlObject = urlparse(response.url)
        host = urlObject.scheme + '://' + urlObject.hostname
        item = response.meta['item']
        sha1 = hashlib.md5()
        sha1.update(item['page_title'].encode(encoding='utf-8'))
        item['page_text_body'] = ''.join(response.css('.main-content div *::text').extract())
        item['page_file_link'] = response.css('img[alt="pdf"] + a::attr(href)').extract_first() or None
        item['page_file_save_path'] = None
        item['file_urls'] = []
        if item['page_file_link']:
            item['page_file_link'] = host + item['page_file_link']
            item['page_file_save_path'] = self.downloadPath + 'file-' + sha1.hexdigest() + '-' + item[
                'page_crawl_time'] + '.pdf'
            item['file_urls'].append(item['page_file_link'])
        yield item
