# encoding: utf-8

import time
import uuid
import datetime
import re
import json
from hashlib import sha1, sha256

import scrapy
from scrapy import Request, FormRequest

try:
    from App_Spd.items import AppSpdItem
    from App_Spd.utils import analysis, get_urlid, get_pubtime, timestamp_now_to
except:
    from app_spider.items import AppSpdItem
    from app_spider.utils import analysis, get_urlid, get_pubtime, timestamp_now_to


def singature(str, str2, str3, str4, sign_salt, str5):
    str_format = "{}&&{}&&{}&&{}&&{}&&{}".format(str, str2, str3, str4, sign_salt, str5)
    s1 = sha256()
    s1.update(str_format.encode("utf-8"))
    return s1.hexdigest()


def gen_headers(_t):
    headers = {}
    signature_salt = "FR*r!isE5W"
    _uid = str(uuid.uuid1())
    headers['X-SESSION-ID'] = '62450060010ef65c3e54b8b1'
    headers['X-REQUEST-ID'] = _uid
    headers['X-TIMESTAMP'] = _t
    headers['X-TENANT-ID'] = '31'
    headers['User-Agent'] = '1.1.8;ffffffff-a77b-5d17-ffff-ffff98c49000;HUAWEI TAS-AN00;Android;7.1.2;360'
    headers['Cache-Control'] = 'no-cache'
    headers['Host'] = 'vapp.tmuyun.com'
    headers['Connection'] = 'Keep-Alive'
    headers['Accept-Encoding'] = 'gzip, deflate'
    headers['X-SIGNATURE'] = singature("/api/article/channel_list", headers['X-SESSION-ID'], _uid, _t,
                                       signature_salt, headers['X-TENANT-ID'])
    return headers


web_headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.74 Safari/537.36 Edg/99.0.1150.46',
}


class JinriyuechengAppSpider(scrapy.Spider):
    name = 'jinriyuecheng_app'

    websiteId = '2141422567'
    pubSource = '今日越城'
    list_url = 'https://vapp.tmuyun.com/api/article/channel_list?channel_id={}'
    detail_url = 'https://vapp.tmuyun.com/api/article/detail?id={}'

    channels = [
        '5dbf7fdfb1985007455762fd',
        "5dbf811bb1985007455762fe",
        "5fd956a1ad61a44c9d1ab2b1",
        "5de74f0b1b011b48a65b7646",
        "5dbf80771b011b790a33c74b",
        "5dbf8143b198500745576301",
    ]

    def start_requests(self):
        for chl in self.channels:
            _t = int(time.time() * 1e3)
            yield Request(url=self.list_url.format(chl), headers=gen_headers(_t))

    def parse(self, response, **kwargs):
        resp = response.json()
        for opt in ['focus_list', 'article_list']:
            datas = resp['data'][opt]
            for obj in datas:
                share_url = obj.get('share_url')
                item = AppSpdItem()
                item['websiteId'] = 2141422567  # 提供app对应的id
                item['pubSource'] = "今日越城"  # 提供app对应名称
                item['title'] = obj.get('doc_title', '')
                item['author'] = obj.get('author', '')
                item['pubtime'] = timestamp_now_to(obj.get('published_at', time.time()))
                yield Request(url=share_url, headers=web_headers, callback=self.parse_detail, meta={'item': item})

    def parse_detail(self, response):
        try:
            response.text
        except Exception as e:
            return
        item = response.meta['item']

        content_ = response.xpath('//div[@class="newshare-content"]').getall()
        content_ = ''.join(content_)
        item['content'] = analysis(content_)
        item['url'] = response.url
        item['urlid'] = get_urlid(item['url'])
        if len(item['author'] + item['content'] + item['title']) > 0:
            yield item
