import scrapy
import sys
import importlib
import logging
import time
import datetime
import math
import hashlib
import re
import spider.utils as utils
from spider.items.Items import PageItem

importlib.reload(sys)

''' epc 网站相关页面爬虫 '''


class KasSpider(scrapy.Spider):
    # 爬虫名称
    name = 'kas'

    # 允许访问的域
    allowed_domains = ['www.kas.de']

    # 抓取网页域名
    host = 'https://www.kas.de'

    # 抓取页面
    paths = [
        '/en/publications',
        '/en/facts-findings',
        '/en/web/auslandsinformationen/articles',
        '/en/web/auslandsinformationen/issues',
        '/en/web/auslandsinformationen/country-reports',
        '/en/web/auslandsinformationen/facts-findings',
        '/en/web/die-politische-meinung/articles',
        '/en/web/die-politische-meinung/issues',
        '/en/web/die-politische-meinung/blog',
        '/en/interview',
        '/en/kurzum',
        '/en/country-reports',
        '/en/monitor',
        '/en/event-reports',
        '/en/handreichungen-zur-politischen-bildung',
        '/en/events',
    ]

    categories = [
        'publications',
        'publications series',
        'international reports',
        'international reports',
        'international reports',
        'international reports',
        'The Political Opinion',
        'The Political Opinion',
        'The Political Opinion',
        'publications series',
        'publications series',
        'publications series',
        'publications series',
        'publications series',
        'publications series',
        'Events',
    ]

    pageIds = [None for path in paths]

    # 对于paths[index]页面，当前正在抓取第pages[index]页数据，从1开始
    pages = [1 for path in paths]

    # 对于paths[index]页面，最大页数为totals[index]
    totals = [0 for path in paths]

    # 初始抓取的URL绝对路径
    start_urls = []

    # 对于paths[index]页面，是否终止抓取，对于增量爬取方式，如果数据旧于爬取时间，则不再继续爬取
    exits = [False for path in paths]

    # 爬取日期字符串，对于增量爬取方式，需要比对文章发布日期和爬取日期
    todayStr = datetime.datetime.now().strftime("%Y-%m-%d")

    # 是否过滤重复数据，对于不同页面的相同文章，是否只记录一条数据，由settings.py的DEDUPLICATED配置
    dontFilter = False

    # 是否增量爬取，对于增量爬取方式，如果数据旧于爬取时间，则不再继续爬取，由settings.py的DOWNLOAD_TODAY配置
    downloadToday = False

    def __init__(self):
        # 抓取网页最终URL
        # 放在__init__的原因是python3修改了类变量的作用域，不能在列表表达式里引用其他类变量！！！
        self.paths = [self.host + path for path in self.paths]
        self.start_urls = self.paths.copy()

    # 初始化启动请求
    def start_requests(self):
        # 是否增量爬取
        self.downloadToday = self.settings.get('DOWNLOAD_TODAY')

        # 文件下载路径
        self.downloadPath = self.settings.get('FILES_STORE')

        # 爬虫日期
        self.todayStr = self.settings.get('TODAY_STR')

        # 是否去重
        self.dontFilter = not bool(self.settings.get('DEDUPLICATED'))

        for url in self.start_urls:
            yield scrapy.Request(url, callback=self.parse, dont_filter=self.dontFilter)

    # 爬取文章列表页
    def parse(self, response):
        # 获取当前爬取页面在self.paths的索引
        index = self.paths.index(response.url.split('?')[0])

        container = response.css('#column-2 .portlet')
        pagination = container.css('.lfr-pagination .last a::attr(href)').extract_first()

        if pagination is None:
            container = response.css('#column-3 .portlet')
            if len(container) > 0:
                container = container[0]

        # 获取最大页数
        if self.totals[index] == 0:
            pagination = container.css('.lfr-pagination .last a::attr(href)').extract_first()
            if pagination is not None:
                self.totals[index] = int(pagination.split('cur=')[1])
                if self.pageIds[index] is None:
                    match = re.findall(r'&([A-Za-z0-9_]+cur)=', pagination)
                    if len(match) > 0:
                        self.pageIds[index] = match[0]

        # 获取文章列表
        papers = container.css('a.o-tile')

        # 遍历文章，爬取需要爬取的字段数据，并封装对象
        for paper in papers:
            item = PageItem()
            item['page_crawl_time'] = str(int(time.time()))
            item['page_from_site'] = self.name
            item['page_title'] = paper.css('.o-page-headline::text').extract_first()
            dateStr = ''.join(paper.css('.o-metadata--date::text').extract())
            if dateStr is not None:
                try:
                    match = re.findall(r'\.', dateStr)
                    if len(match) > 0:
                        publishDate = time.strptime(dateStr.strip(), '%d. %B %Y')
                    else:
                        publishDate = time.strptime(dateStr.strip(), '%B %d, %Y')
                except:
                    publishDate = time.localtime()
            item['page_publish_date'] = time.strftime('%Y-%m-%d', publishDate)
            item['page_publish_timestamp'] = time.mktime(publishDate)
            item['page_author'] = dateStr = paper.css('.o-metadata--author::text').extract_first()

            # 增量爬取判断
            # if self.downloadToday and self.todayStr > item['time']:
            #     self.exits[index] = True
            #     logging.info('<=============已开启增量下载，没有更多的新增数据，退出爬虫！=============>')
            #     break

            # 对于文章内容，标签、pdf链接字段，需要进入到文章详情页面获取
            item['page_text_body'] = ''
            item['page_tags'] = ''
            item['page_category'] = self.categories[index]
            item['page_lang'] = 'en'
            item['page_url'] = self.host + paper.css('::attr(href)').extract_first()

            # 进入文章详情页面爬取数据
            yield scrapy.Request(item['page_url'], callback=self.parsePaper, meta={'item': item}, dont_filter=self.dontFilter)

        # 进入下一页爬取数据
        self.pages[index] += 1
        if not self.exits[index] and self.pages[index] <= self.totals[index]:
            yield scrapy.Request(self.paths[index] + '?' + self.pageIds[index] + '=' + str(self.pages[index]) + '&p_p_id=' + self.pageIds[index].lstrip('_').rstrip('_cur'),
                                 callback=self.parse, dont_filter=self.dontFilter)

    # 爬取文章详情页
    def parsePaper(self, response):
        item = response.meta['item']
        item['page_text_body'] = ''.join(response.css('.c-page-main__text *::text').extract())
        item['page_tags'] = None
        item['page_file_link'] = response.css('a.o-cta::attr(href)').extract_first()
        item['page_file_save_path'] = None
        item['file_urls'] = []
        sha1 = hashlib.md5()
        sha1.update(item['page_title'].encode(encoding='utf-8'))
        if item['page_file_link']:
            item['page_file_link'] = self.host + item['page_file_link']
            item['page_file_save_path'] = self.downloadPath + 'file-' + sha1.hexdigest() + '-' + item[
                'page_crawl_time'] + '.pdf'
            item['file_urls'].append(item['page_file_link'])
        yield item
