# encoding: utf-8

import time
import uuid
import datetime
import re
import json
from hashlib import sha1, sha256

import scrapy
from scrapy import Request, FormRequest

try:
    from App_Spd.items import AppSpdItem
    from App_Spd.utils import analysis, get_urlid, get_pubtime
except:
    from app_spider.items import AppSpdItem
    from app_spider.utils import analysis, get_urlid, get_pubtime


def singature(str, str2, str3, str4, sign_salt, str5):
    str_format = "{}&&{}&&{}&&{}&&{}&&{}".format(str, str2, str3, str4, sign_salt, str5)
    s1 = sha256()
    s1.update(str_format.encode("utf-8"))
    return s1.hexdigest()


def gen_headers(_t):
    headers = {}
    signature_salt = "FR*r!isE5W"
    _uid = str(uuid.uuid1())
    headers['X-SESSION-ID'] = '6242736db77d2e7967615c10'
    headers['X-REQUEST-ID'] = _uid
    headers['X-TIMESTAMP'] = _t
    headers['X-TENANT-ID'] = '30'
    headers['User-Agent'] = '1.1.8;ffffffff-a77b-5d17-ffff-ffff98c49000;HUAWEI TAS-AN00;Android;7.1.2;360'
    headers['Cache-Control'] = 'no-cache'
    headers['Host'] = 'app.pjnews.cn'
    headers['Connection'] = 'Keep-Alive'
    headers['Accept-Encoding'] = 'gzip, deflate'
    headers['X-SIGNATURE'] = singature("/api/article/channel_list", "6242736db77d2e7967615c10", _uid, _t,
                                       signature_salt, "30")
    return headers


web_headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.74 Safari/537.36 Edg/99.0.1150.46',
}


class RongpananAppSpider(scrapy.Spider):
    websiteId = '2141422575'
    pubSource = '融磐安'
    name = 'rongpanan_app'

    list_url = 'https://vapp.tmuyun.com/api/article/channel_list?channel_id={}&isDiangHao=false&is_new=true&list_count=0&size=10'

    channels = [
        '5d78670b1b011b406fe592b4',
        '5d8881ae1b011b0c53740a49',
        '611dc492ad61a462bfb02cdf',
        '5d9c448f1b011b4c5ec2c2c8',
        '5d9c449db19850083c1f884c',
        '5d786945b198505dc6f2533e',
    ]

    def start_requests(self):
        for chl in self.channels:
            _t = int(time.time() * 1e3)
            yield Request(url=self.list_url.format(chl), headers=gen_headers(_t))

    def parse(self, response, **l):
        target_li = []
        resp = response.json()
        # print(resp)
        for name in ['focus_list', 'article_list']:
            li = resp['data'].get(name, [])
            target_li += li

        for dt in target_li:
            item = AppSpdItem()
            item['title'] = dt.get('doc_title', '')
            item['author'] = dt.get('author', '')
            item['url'] = dt.get('url', '')
            if item['url']:
                yield Request(url=item['url'], callback=self.parse_detail, meta={'item': item}, headers=web_headers)

    def parse_detail(self, response):
        item = response.meta.get('item')
        content_ = response.xpath('//div[@class="newshare-content"]').getall()
        content_ = ''.join(content_)
        item['content'] = analysis(content_)
        item['pubtime'] = response.xpath('//div[@class="text-from"]/span[@class="text-time"]/text()').get()
        item['websiteId'] = 2141422575  # 提供app对应的id
        item['pubSource'] = "融磐安"  # 提供app对应名称
        item['urlid'] = get_urlid(item['url'])
        yield item
