# -*- coding=utf-8 -*-
import math
import time

import hashlib
import scrapy
from scrapy.exceptions import CloseSpider
from scrapy.spiders import Spider
from ..items import ZhiWangItem, _DBConf
from .commonFn import *
from redis import Redis


class CourseSpider(Spider):
    name = "Toutiao"
    allowed_domains = ["toutiao.com"]
    start_urls = [
        'https://www.toutiao.com/'
    ]
    i = 0
    key_word = '远程教育'
    list_url = 'https://www.toutiao.com/api/pc/feed/'
    headers = {
        'accept': "text/javascript, text/html, application/xml, text/xml, */*",
        'accept-encoding': "gzip, deflate, br",
        'accept-language': "zh-CN,zh;q=0.9",
        'cache-control': "no-cache",
        'content-type': "multipart/form-data; boundary=----WebKitFormBoundary7MA4YWxkTrZu0gW",
        'dnt': "1",
        'pragma': "no-cache",
        'referer': "https://www.toutiao.com/",
        'user-agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36",
        'x-requested-with': "XMLHttpRequest"
    }

    myFormData = {
        "min_behot_time": '0',
        "category": '__all__',
        "utm_source": 'toutiao',
        "widen": '1',
        "tadrequire": 'true',
    }

    @staticmethod
    def getASCP():
        t = int(math.floor(time.time()))
        e = hex(t).upper()[2:]
        m = hashlib.md5()
        m.update(str(t).encode(encoding='utf-8'))
        i = m.hexdigest().upper()

        if len(e) != 8:
            AS = '479BB4B7254C150'
            CP = '7E0AC8874BB0985'
            return AS, CP
        n = i[0:5]
        a = i[-5:]
        s = ''
        r = ''
        for o in range(5):
            s += n[o] + e[o]
            r += e[o + 3] + a[o]

        AS = 'A1' + s + e[-3:]
        CP = e[0:3] + r + 'E1'
        return AS, CP

    def __init__(self, storeConf=json.dumps(_DBConf), limit_count=0, trash_data=False, *a, **kw):
        # 获取数据库配置
        super().__init__(*a, **kw)
        initSpider(self, trash_data=trash_data, limit_count=limit_count, storeConf=storeConf)
        self.limit_count = limit_count
        self.r = Redis(db=1)

    def start_requests(self):
        """
        请求开始前的预请求，帮助完成cookie的设定等
        :return:
        """
        yield scrapy.Request(url=self.start_urls[0],
                             headers=self.headers,
                             meta={'cookiejar': 1},
                             callback=self.parse)

    def parse(self, response):
        """
        初始化信息，抓取到总数
        :param response:
        :return:
        """
        self.headers['Referer'] = response.request.url
        make_as, make_cp = self.getASCP()
        self.myFormData['as'] = make_as
        self.myFormData['cp'] = make_cp
        yield scrapy.FormRequest(url=self.list_url,
                                 headers=self.headers,
                                 method='GET',
                                 meta={'cookiejar': 1},
                                 formdata=self.myFormData,
                                 callback=self.parse_list,
                                 dont_filter=True)

    def parse_list(self, response):
        """
        构造抓取页面
        :param response:
        :return:
        """
        # 总页数
        paper_size = int(response.xpath('//*[@id="totalcount"]/text()').extract_first())
        paper_num = paper_size // int(self.myFormData['pagesize'])
        # 构造请求
        for page in range(1, paper_num):
            self.myFormData["pageindex"] = str(page),
            yield scrapy.FormRequest(url=self.list_url,
                                     headers=self.headers,
                                     method='POST',
                                     meta={'cookiejar': page + 1, 'page': page},  # 更新会话
                                     formdata=self.myFormData,
                                     callback=self.parse_list_link,
                                     dont_filter=True)

    def parse_list_link(self, response):
        """
        对列表进行解析，然后进入到详情页面抓取数据了
        :param response:
        :return:
        """
        self.headers['Referer'] = response.request.url
        items = response.xpath('//a[@class="c-company-top-link"]')
        # 可以将已爬取详情页数写入文件进行记录
        with open('../record_page_new.txt', 'a') as f:
            f.write(str(response.meta['page']) + '\n')
        for item in items:
            yield scrapy.Request(url=item.xpath('@href').extract_first(),
                                 meta={'cookiejar': response.meta['cookiejar'], 'public_year': item.xpath(
                                     'div[@class="c-company__body-name"]/span/text()').extract_first().replace(' ',
                                                                                                               '').replace(
                                     '\r\n', '')},
                                 headers=self.headers,
                                 callback=self.parse_item)

    def parse_item(self, response):
        """
        详情页面解析
        :param response:
        :return:
        """
        base_info = response.xpath('/html/body/div[4]/div[1]')
        keywords = base_info.xpath('//div[contains(text(),"关键词")]/following-sibling::*/a/text()').extract()
        item = ZhiWangItem()
        item['html'] = base_info.xpath('//div[@class="c-card__aritcle"]/text()').extract_first().replace(" ", "")
        item['url'] = response.url
        item['title'] = base_info.xpath('//div[@class="c-card__title2"]/text()').extract_first().replace(" ", "")
        item['author'] = base_info.xpath('//div[@class="c-card__author"]/a[3]/text()').extract_first()
        item['tags'] = keywords
        download_info = response.xpath('/html/body/div[4]/div[2]/div[1]')
        item['link_num'] = download_info.xpath("a[1]/text()").extract_first()
        item['download_num'] = download_info.xpath("a[2]/text()").extract_first()
        public_info = response.meta['public_year'].split('\xa0')
        if len(public_info) == 3:
            item['source_type'] = public_info[1]
            item['college'] = public_info[0]
            item['public_year'] = public_info[2]
        else:
            if response.meta['public_year'].find('期') != -1:
                # 表示是期刊，那么我们自己来进行分解
                public_info = response.meta['public_year'].split(" ")[0].split('\xa0')
                item['source_type'] = '期刊'
                item['college'] = public_info[0]
                item['public_year'] = public_info[1]
            else:
                self.log("还有其他不知名类型存在。。")
        yield item
