import scrapy
import sys
import importlib
import logging
import datetime
import time
import re
import hashlib
from urllib.parse import urlparse
from scrapy_redis.spiders import RedisSpider
from spider.items.Items import PageItem
from spider.settings import EXTENDS_SPIDER

importlib.reload(sys)

''' elcano-blog 网站相关页面爬虫 '''


class BasicSpider(EXTENDS_SPIDER):
    # 爬虫名称
    name = 'elcano-blog'

    # 允许访问的域
    allowed_domains = ['blog.realinstitutoelcano.org']

    # 抓取网页域名
    host = 'https://blog.realinstitutoelcano.org'

    # 抓取页面
    path = '/en/page/{}/'

    # 初始抓取的URL绝对路径
    start_urls = []

    # 爬取日期字符串，对于增量爬取方式，需要比对文章发布日期和爬取日期
    todayStr = ''

    # 是否过滤重复数据，对于不同页面的相同文章，是否只记录一条数据，由settings.py的DEDUPLICATED配置
    dontFilter = False

    # 是否增量爬取，对于增量爬取方式，如果数据旧于爬取时间，则不再继续爬取，由settings.py的DOWNLOAD_TODAY配置
    downloadToday = False

    downloadPath = ''

    count = 0

    def __init__(self):
        # 放在__init__的原因是python3修改了类变量的作用域，不能在列表表达式里引用其他类变量！！！
        self.path = self.host + self.path

        self.start_urls = [self.path]

        # 对于paths[index]页面，当前正在抓取第pages[index]页数据，从0开始
        self.page = 1

        # 对于paths[index]页面，最大页数为totals[index]
        self.total = 0

        # 对于paths[index]页面，是否终止抓取，对于增量爬取方式，如果数据旧于爬取时间，则不再继续爬取
        self.exit = False

    # 初始化启动请求
    def start_requests(self):
        # 是否增量爬取
        self.downloadToday = self.settings.get('DOWNLOAD_TODAY')

        # 文件下载路径
        self.downloadPath = self.settings.get('FILES_STORE')

        # 爬虫日期
        self.todayStr = self.settings.get('TODAY_STR')

        # 是否去重
        self.dontFilter = not bool(self.settings.get('DEDUPLICATED'))

        yield scrapy.Request(self.path.format('1'), callback=self.parse, dont_filter=self.dontFilter,
                             cb_kwargs={'isFirstPage': True})

    # 爬取文章列表页
    def parse(self, response, isFirstPage):
        pagination = response.css('.pagination ul > a::attr(href)')
        if isFirstPage and pagination:
            self.total = int(re.findall(r'\d+', pagination.extract_first())[0])

        # 适配不同页面的细微结构差异
        container = response.css('div#content_box')

        # 获取文章列表
        category = 'blog'
        papers = container.css('div.post')
        count = len(papers)

        for i in range(count):
            item = PageItem()
            item['page_title'] = papers[i].css('header h2 a::text').extract_first().strip()
            item['page_url'] = papers[i].css('header h2 a::attr(href)').extract_first().strip()

            publishDate = time.strptime(
                papers[i].css('.post-info .post-info-inner span.thetime::text').extract_first().strip(), '%d/%m/%Y')
            item['page_publish_date'] = time.strftime('%Y-%m-%d', publishDate)
            item['page_publish_timestamp'] = time.mktime(publishDate)
            item['page_tags'] = None
            item['page_lang'] = 'en'

            # 增量爬取判断
            # if self.downloadToday and self.todayStr > item['page_publish_date']:
            #     self.exits[index] = True
            #     logging.info('<=============已开启增量下载，没有更多的新增数据，退出爬虫！=============>')
            #     break

            authors = papers[i].css('.post-info .post-info-inner span.theauthor::text').extract()
            item['page_author'] = ''.join(['|' if a.strip() == 'and' else a for a in authors])
            item['page_category'] = category
            item['page_from_site'] = self.name
            item['page_crawl_time'] = str(int(time.time()))

            # 进入文章详情页面爬取数据
            # logging.info('detail++++++')
            # logging.info(item['page_url'])

            yield scrapy.Request(item['page_url'], callback=self.parsePaper, meta={'item': item},
                                 dont_filter=self.dontFilter)

            # 进入下一页爬取数据
            self.page += 1
            if not self.exit and self.page <= self.total:
                yield scrapy.Request(self.path.format(str(self.page)), callback=self.parse,
                                     dont_filter=self.dontFilter, cb_kwargs={'isFirstPage': False})

    # 爬取文章详情页
    def parsePaper(self, response):
        # 初始化下载Item
        item = response.meta['item']
        sha1 = hashlib.md5()
        sha1.update(item['page_title'].encode(encoding='utf-8'))

        p = response.css(
            '.post-single-content p:not(p.wpml-ls-statics-post_translations)::text, .post-single-content h2::text')
        item['page_text_body'] = ''.join(p.extract())
        item['page_file_link'] = None
        item['page_file_save_path'] = None
        item['file_urls'] = []
        # if item['page_file_link']:
        #     item['page_file_link'] = self.host + item['page_file_link']
        #     item['page_file_save_path'] = self.downloadPath + 'file-' + sha1.hexdigest() + '-' + item['page_crawl_time'] + '.pdf'
        #     item['file_urls'].append(item['page_file_link'])

        # logging.info('====================')
        # logging.info(item['page_title'])
        yield item
