import scrapy
import sys
import importlib
import logging
import time
import datetime
import hashlib
import spider.utils as utils
from spider.items.Items import PageItem

importlib.reload(sys)

''' iss 网站相关页面爬虫 '''


class IssSpider(scrapy.Spider):
    # 爬虫名称
    name = 'iss'

    # 允许访问的域
    allowed_domains = ['iss.europa.eu']

    # 抓取网页域名
    host = 'https://www.iss.europa.eu'

    # 抓取页面
    paths = [
        '/publications/chaillot-papers',
        '/publications/books',
        '/publications/reports',
        '/publications/briefs',
        '/activities/conferences',
        '/activities/seminars',
        '/activities/task-forces',
        '/activities/other-events',
        '/activities/podcast',
        '/topics/eu-foreign-policy',
        '/topics/global-governance',
        '/topics/security-and-defence',
        '/topics/strategic-foresight',
        '/topics/transnational-challenges',
        '/regions/africa',
        '/regions/asia',
        '/regions/mena',
        '/regions/russia-and-eastern-neighbours',
        '/regions/the-americas',
        '/regions/western-balkans',
    ]

    # 对于paths[index]页面，当前正在抓取第pages[index]页数据，从0开始
    pages = [0 for path in paths]

    # 对于paths[index]页面，最大页数为totals[index]
    totals = [0 for path in paths]

    # 初始抓取的URL绝对路径
    start_urls = []

    # 对于paths[index]页面，是否终止抓取，对于增量爬取方式，如果数据旧于爬取时间，则不再继续爬取
    exits = [False for path in paths]

    # 爬取日期字符串，对于增量爬取方式，需要比对文章发布日期和爬取日期
    todayStr = datetime.datetime.now().strftime("%Y-%m-%d")

    # 是否过滤重复数据，对于不同页面的相同文章，是否只记录一条数据，由settings.py的DEDUPLICATED配置
    dontFilter = False

    # 是否增量爬取，对于增量爬取方式，如果数据旧于爬取，则不再继续爬取，由settings.py的DOWNLOAD_TODAY配置
    downloadToday = False

    def __init__(self):
        # 抓取网页最终URL
        # 放在__init__的原因是python3修改了类变量的作用域，不能在列表表达式里引用其他类变量！！！
        self.paths = [self.host + path for path in self.paths]
        self.start_urls = [path + '?page=0' for path in self.paths]

    # 初始化启动请求
    def start_requests(self):
        # 是否增量爬取
        self.downloadToday = self.settings.get('DOWNLOAD_TODAY')
        # 文件下载路径
        self.downloadPath = self.settings.get('FILES_STORE')
        # 爬虫日期
        self.todayStr = self.settings.get('TODAY_STR')
        # 是否去重
        self.dontFilter = not bool(self.settings.get('DEDUPLICATED'))
        for url in self.start_urls:
            yield scrapy.Request(url, callback=self.parse, dont_filter=self.dontFilter)

    # 爬取文章列表页
    def parse(self, response):
        # 获取当前爬取页面在self.paths的索引
        index = self.paths.index(response.url.split('?')[0])

        # 获取最大页数
        if self.totals[index] == 0:
            pagination = response.css('ul.pagination li.last a::attr(href)').extract_first()
            if pagination is not None:
                self.totals[index] = int(pagination.split('=')[1])

        # 适配不同页面的细微结构差异
        container = response.css('div.quicktabs-tabpage:not(.quicktabs-hide)') or response

        # 获取文章列表
        papers = container.css('div.block-content')

        # 遍历文章，爬取需要爬取的字段数据，并封装对象
        for paper in papers:
            item = PageItem()
            item['page_crawl_time'] = str(int(time.time()))
            item['page_from_site'] = self.name
            item['page_title'] = paper.css('div.field-name-title span a::text').extract_first()
            publishDate = time.strptime(utils.formatTime(paper.css('span.date-display-single::attr(content)').extract_first()), '%Y-%m-%d')

            item['page_publish_date'] = time.strftime('%Y-%m-%d', publishDate)
            item['page_publish_timestamp'] = time.mktime(publishDate)

            # 增量爬取判断
            # if self.downloadToday and self.todayStr > item['time']:
            #     self.exits[index] = True
            #     logging.info('<=============已开启增量下载，没有更多的新增数据，退出爬虫！=============>')
            #     break

            item['page_author'] = ', '.join(paper.css('div.field-name-field-author li a::text').extract())
            # item['abstract'] = ''.join(paper.css('div.medium-teaser-item-summary div.body *::text').extract()) or ''

            # 对于文章内容，标签、pdf链接字段，需要进入到文章详情页面获取
            item['page_text_body'] = ''
            item['page_tags'] = ''
            item['page_category'] = 'The European Union Institute for Security Studies (EUISS) '
            item['page_lang'] = 'en'
            # item['ISO_code'] = 'FRA'
            item['page_url'] = self.host + paper.css('div.medium-teaser-right-side a::attr(href)').extract_first()

            # 进入文章详情页面爬取数据
            yield scrapy.Request(item['page_url'], callback=self.parsePaper, meta={'item': item}, dont_filter=self.dontFilter)

        # 进入下一页爬取数据
        self.pages[index] += 1
        if not self.exits[index] and self.pages[index] <= self.totals[index]:
            yield scrapy.Request(self.paths[index] + '?page=' + str(self.pages[index]), callback=self.parse, dont_filter=self.dontFilter)

    # 爬取文章详情页
    def parsePaper(self, response):
        item = response.meta['item']
        item['page_text_body'] = ''.join(response.css('div.body *::text').extract())
        item['page_tags'] = ', '.join(response.css('div.main div.publication-info ul.links li a::text').extract())
        item['page_file_link'] = response.css('div.file-type-pdf a::attr(href)').extract_first() or None
        item['page_file_save_path'] = None
        item['file_urls'] = []
        sha1 = hashlib.md5()
        sha1.update(item['page_title'].encode(encoding='utf-8'))
        if item['page_file_link']:
            item['page_file_link'] = self.host + item['page_file_link']
            item['page_file_save_path'] = self.downloadPath + 'file-' + sha1.hexdigest() + '-' + item[
                'page_crawl_time'] + '.pdf'
            item['file_urls'].append(item['page_file_link'])
        yield item
