import scrapy
import sys
import importlib
import logging
import datetime
import time
import re
import hashlib
import json
from scrapy.selector import Selector
from urllib.parse import urlparse
from scrapy_redis.spiders import RedisSpider
from spider.items.Items import PageItem
from spider.settings import EXTENDS_SPIDER

importlib.reload(sys)

''' elcano 网站相关页面爬虫 '''


class BasicSpider(EXTENDS_SPIDER):
    # 爬虫名称
    name = 'ceps'

    # 允许访问的域
    allowed_domains = ['www.ceps.eu']

    # 抓取网页域名
    host = 'https://www.ceps.eu'

    # 初始抓取的URL绝对路径
    start_urls = [
        # /ceps-news/页面的异步数据接口
        '/wp-admin/admin-ajax.php'
    ]

    # 爬取日期字符串，对于增量爬取方式，需要比对文章发布日期和爬取日期
    todayStr = ''

    # 是否过滤重复数据，对于不同页面的相同文章，是否只记录一条数据，由settings.py的DEDUPLICATED配置
    dontFilter = False

    # 是否增量爬取，对于增量爬取方式，如果数据旧于爬取时间，则不再继续爬取，由settings.py的DOWNLOAD_TODAY配置
    downloadToday = False

    downloadPath = ''

    count = 0

    formDatas = [
        {
            'action': 'ut_archive_ajax',
            'post-type': 'news',
            'articles': []
        },
        {
            'action': 'ut_archive_ajax',
            'post-type': 'post',
            'articles': []
        },
        {
            'action': 'ut_archive_ajax',
            'post-type': 'publications',
            'articles': []
        },
        {
            'action': 'ut_archive_ajax',
            'post-type': 'task-forces',
            'articles': []
        },
        {
            'action': 'ut_archive_ajax',
            'post-type': 'projects',
            'articles': []
        }
    ]

    pages = []

    totals = []

    exits = []

    categories = [
        'CEPS News',
        'CEPS In Brief',
        'CEPS Publications',
        'CEPS Task Forces',
        'CEPS Projects',
        # 'Global Memos',
        # 'Member Articles',
        # 'Global Governance Working Papers',
        # 'Report Cards',
        # 'Current publications',
        # 'EconPol Opinions',
        # 'EconPol Policy Briefs',
        # 'EconPol Policy Reports',
        # 'EconPol Working Paper Series',
        # 'News Archive',
        # '',
        # 'MOST RECENT GLOBAL LIBRARY',
        # 'Latest Research',
        # 'Tools and Platforms',
        # 'Guidance',
        # 'Case Studies',
        # 'National Documents'
    ]

    def __init__(self):
        # 放在__init__的原因是python3修改了类变量的作用域，不能在列表表达式里引用其他类变量！！！

        # 对于formDatas[index]页面，当前正在抓取第pages[index]页数据，从0开始
        self.pages = [1 for path in self.formDatas]

        # 对于formDatas[index]页面，最大页数为totals[index]
        self.totals = [0 for path in self.formDatas]

        # 对于formDatas[index]页面，是否终止抓取，对于增量爬取方式，如果数据旧于爬取时间，则不再继续爬取
        self.exits = [False for path in self.formDatas]

        # 对于formDatas[index]页面，已经爬取的文章
        self.articles = [[] for path in self.formDatas]

    # 初始化启动请求
    def start_requests(self):
        # 是否增量爬取
        self.downloadToday = self.settings.get('DOWNLOAD_TODAY')

        # 文件下载路径
        self.downloadPath = self.settings.get('FILES_STORE')

        # 爬虫日期
        self.todayStr = self.settings.get('TODAY_STR')

        # 是否去重
        self.dontFilter = not bool(self.settings.get('DEDUPLICATED'))
        for i, data in enumerate(self.formDatas):
            url = self.host + self.start_urls[0]
            yield scrapy.FormRequest(url=url, callback=self.parse,
                                     formdata=data, dont_filter=self.dontFilter,
                                     cb_kwargs={'isFirstPage': True, 'index': i})

    # 爬取文章列表页
    def parse(self, response, isFirstPage, index):
        print('---------------------------')
        print(index)

        # logging.info('====================parse')
        result = json.loads(response.text)
        if isFirstPage:
            # logging.info('first==========================')
            self.totals[index] = int(result['max'])
            logging.info('=========>Page Total: ' + str(self.totals[index]))

        papers = Selector(text='<html>' + result['articles'] + '</html>').css('.ut-post-card')
        for paper in papers:
            item = PageItem()

            item['page_title'] = paper.css('.ut-caption-title-small a::text').extract_first()
            item['page_url'] = paper.css('.ut-caption-title-small a::attr(href)').extract_first()

            if item['page_title'] is None:
                item['page_title'] = paper.css('.ut-caption-title-xsmall a::text').extract_first()

            if item['page_url'] is None:
                item['page_url'] = paper.css('a.ut-link-reset::attr(href)').extract_first()

            try:
                publishDateStr = paper.css('.ut-date-label::text').extract_first()
                publishDate = time.strptime(publishDateStr, "%d %b %Y")
                item['page_publish_date'] = time.strftime('%Y-%m-%d', publishDate)
                item['page_publish_timestamp'] = time.mktime(publishDate)
            except:
                publishDate = time.localtime()
                item['page_publish_date'] = time.strftime('%Y-%m-%d', publishDate)
                item['page_publish_timestamp'] = time.mktime(publishDate)

            item['page_tags'] = None
            item['page_lang'] = 'en'

            # 记录文章id，用于下次请求
            articleId = paper.css('::attr(id)').extract_first()
            self.formDatas[index]['articles'].append(articleId[13:])

            # 增量爬取判断
            # if self.downloadToday and self.todayStr > item['page_publish_date']:
            #     self.exits[index] = True
            #     logging.info('<=============已开启增量下载，没有更多的新增数据，退出爬虫！=============>')
            #     break
            yield scrapy.Request(item['page_url'], callback=self.parsePaper,
                                 meta={'item': item, 'index': index},
                                 dont_filter=self.dontFilter)

        # 进入下一页爬取数据
        self.pages[index] += 1
        if not self.exits[index] and self.pages[index] <= self.totals[index]:
            data = self.formDatas[index].copy()
            articles = []
            for articleId in data['articles']:
                articles.append('articles[' + articleId + ']=')
            articlesStr = '&'.join(articles)
            data['articles'] = articlesStr
            url = self.host + self.start_urls[0]
            yield scrapy.FormRequest(url=url, callback=self.parse,
                                     formdata=data, dont_filter=self.dontFilter,
                                     cb_kwargs={'isFirstPage': False, 'index': index})

    # 爬取文章详情页
    def parsePaper(self, response):
        # 初始化下载Item
        item = response.meta['item']
        index = response.meta['index']

        print('=======================')
        print(index)
        authors = response.css('.ut-caption-title-xsmall::text').extract()
        item['page_author'] = '|'.join([author.strip() for author in authors])
        item['page_category'] = self.categories[index]
        item['page_from_site'] = self.name
        item['page_crawl_time'] = str(int(time.time()))

        sha1 = hashlib.md5()
        sha1.update(item['page_title'].encode(encoding='utf-8'))
        item['page_text_body'] = ''.join(response.css('.ut-content p::text').extract())
        item['page_file_link'] = None
        item['page_file_save_path'] = None
        yield item
