import scrapy
import sys
import importlib
import logging
import datetime
import time
import re
import hashlib
import json
from scrapy.selector import Selector
from urllib.parse import urlparse
from scrapy_redis.spiders import RedisSpider
from spider.items.Items import PageItem
from spider.settings import EXTENDS_SPIDER

importlib.reload(sys)

''' elcano 网站相关页面爬虫 '''


class DgapSpider(EXTENDS_SPIDER):
    # 爬虫名称
    name = 'dgap'

    # 允许访问的域
    allowed_domains = ['dgap.org']

    # 抓取网页域名
    host = 'https://dgap.org'

    # 初始抓取的URL绝对路径
    start_urls = [
        # 页面的异步数据接口
        '/en/views/ajax'
    ]

    session_url = ''

    # 爬取日期字符串，对于增量爬取方式，需要比对文章发布日期和爬取日期
    todayStr = ''

    # 是否过滤重复数据，对于不同页面的相同文章，是否只记录一条数据，由settings.py的DEDUPLICATED配置
    dontFilter = False

    # 是否增量爬取，对于增量爬取方式，如果数据旧于爬取时间，则不再继续爬取，由settings.py的DOWNLOAD_TODAY配置
    downloadToday = False

    downloadPath = ''

    formDatas = [
        {
            'view_name': 'dgap_ip_publications',
            'view_display_id': 'block_publications',
            'page:': '1',
        },
    ]

    pages = []

    exits = []

    categories = [
        'TOPICS',
        'TOPICS',
        'TOPICS',
        'TOPICS',
        'TOPICS',
        'TOPICS',
        'TOPICS',
        'TOPICS',
        'REGIONS',
        'REGIONS',
        'REGIONS',
        'REGIONS',
        'REGIONS',
        'REGIONS',
        'PUBLICATIONS',
    ]

    def __init__(self):
        # 放在__init__的原因是python3修改了类变量的作用域，不能在列表表达式里引用其他类变量！！！

        # 对于formDatas[index]页面，当前正在抓取第pages[index]页数据，从0开始
        self.pages = [1 for path in self.formDatas]

        # 对于formDatas[index]页面，是否终止抓取，对于增量爬取方式，如果数据旧于爬取时间，则不再继续爬取
        self.exits = [False for path in self.formDatas]

    # 初始化启动请求
    def start_requests(self):
        # 是否增量爬取
        self.downloadToday = self.settings.get('DOWNLOAD_TODAY')

        # 文件下载路径
        self.downloadPath = self.settings.get('FILES_STORE')

        # 爬虫日期
        self.todayStr = self.settings.get('TODAY_STR')

        # 是否去重
        self.dontFilter = not bool(self.settings.get('DEDUPLICATED'))
        for i, data in enumerate(self.formDatas):
            url = self.host + self.start_urls[0]
            yield scrapy.FormRequest(url=url, callback=self.parse,
                                     formdata=data, dont_filter=self.dontFilter,
                                     cb_kwargs={'index': i})

    # 爬取文章列表页
    def parse(self, response, index):
        print('======================')
        print(response.text)
        result = json.loads(response.text)
        if len(result) == 2 and 'data' in result[1]:
            papers = Selector(text='<html>' + result[1]['data'] + '</html>').css('article[role="article"]')
            count = len(papers)
            for paper in papers:
                item = PageItem()
                item['page_title'] = paper.css('h2 a span::text').extract_first()
                item['page_url'] = paper.css('h2 a::attr(href)').extract_first()
                try:
                    publishDateStr = paper.css('time::attr(datetime)').extract_first()
                    publishDate = time.strptime(publishDateStr[:10], "%Y-%m-%d")
                    item['page_publish_date'] = time.strftime('%Y-%m-%d', publishDate)
                    item['page_publish_timestamp'] = time.mktime(publishDate)
                except:
                    publishDate = time.localtime()
                    item['page_publish_date'] = time.strftime('%Y-%m-%d', publishDate)
                    item['page_publish_timestamp'] = time.mktime(publishDate)

                item['page_tags'] = None
                item['page_lang'] = 'en'

                # 增量爬取判断
                # if self.downloadToday and self.todayStr > item['page_publish_date']:
                #     self.exits[index] = True
                #     logging.info('<=============已开启增量下载，没有更多的新增数据，退出爬虫！=============>')
                #     break

                yield scrapy.Request(item['page_url'], callback=self.parsePaper,
                                    meta={'item': item, 'index': index},
                                    dont_filter=self.dontFilter)

            # 进入下一页爬取数据
            self.pages[index] += 1
            if count == 0:
                logging.info('<=============没有下一页内容了！=============>')
            else:
                data = self.formDatas[index].copy()
                data['page'] = str(self.pages[index])
                url = self.host + self.start_urls[0]
                yield scrapy.FormRequest(url=url, callback=self.parse,
                                         formdata=data, dont_filter=self.dontFilter,
                                         cb_kwargs={'index': index})

    # 爬取文章详情页
    def parsePaper(self, response):
        # 初始化下载Item
        item = response.meta['item']
        index = response.meta['index']

        authors = response.css('.field--name-field-authors a::text').extract()
        item['page_author'] = '|'.join([author.strip() for author in authors])
        item['page_category'] = self.categories[index]
        item['page_from_site'] = self.name
        item['page_crawl_time'] = str(int(time.time()))

        sha1 = hashlib.md5()
        sha1.update(item['page_title'].encode(encoding='utf-8'))
        item['page_text_body'] = ''.join(response.css('.content p *::text').extract())
        item['page_file_link'] = None
        item['page_file_save_path'] = None
        yield item
