# encoding: utf-8

import time
import uuid
import datetime
import re
import json
from hashlib import sha1, sha256

import scrapy
from scrapy import Request, FormRequest

try:
    from App_Spd.items import AppSpdItem
    from App_Spd.utils import analysis, get_urlid, get_pubtime
except:
    from app_spider.items import AppSpdItem
    from app_spider.utils import analysis, get_urlid, get_pubtime


def singature(str, str2, str3, str4, sign_salt, str5):
    str_format = "{}&&{}&&{}&&{}&&{}&&{}".format(str, str2, str3, str4, sign_salt, str5)
    s1 = sha256()
    s1.update(str_format.encode("utf-8"))
    return s1.hexdigest()


def gen_headers(_t):
    headers = {}
    signature_salt = "FR*r!isE5W"
    _uid = str(uuid.uuid1())
    headers['X-SESSION-ID'] = '623acc27fe3fc13e69740a2d'
    headers['X-REQUEST-ID'] = _uid
    headers['X-TIMESTAMP'] = _t
    headers['X-TENANT-ID'] = '23'
    headers['User-Agent'] = '1.1.6;ffffffff-cb81-b84d-ffff-ffff98c49000;HUAWEI TAS-AN00;Android;7.1.2;Release'
    headers['Cache-Control'] = 'no-cache'
    headers['Host'] = 'vapp.tmuyun.com'
    headers['Connection'] = 'Keep-Alive'
    headers['Accept-Encoding'] = 'gzip, deflate'
    headers['X-SIGNATURE'] = singature("/api/article/channel_list", headers['X-SESSION-ID'], _uid, _t,
                                       signature_salt, headers['X-TENANT-ID'])
    return headers


class XinfuchengAppSpider(scrapy.Spider):
    websiteId = 2141422577
    pubSource = '新府城'
    name = 'xinfucheng_app'

    list_url = 'https://vapp.tmuyun.com/api/article/channel_list?channel_id=5d3e4442b198500f695bdd54&isDiangHao=false&is_new=true&list_count={}&size=100&start={}'
    detail_url = 'https://vapp.tmuyun.com/webDetails/news?id={}&tenantId=23'

    def start_requests(self):
        for i in range(1, 2):
            target = self.list_url.format(i * 10, int(time.time() * 1e3))
            _t = int(time.time() * 1e3)
            yield scrapy.Request(url=target, headers=gen_headers(_t))

    def parse(self, response, **kwargs):
        resp = response.json()
        # print(resp)
        for art in resp['data']['article_list']:
            item = dict()
            item['author'] = art.get('author', '')
            _t = int(time.time() * 1e3)
            yield Request(url=self.detail_url.format(art['id']), headers=gen_headers(_t),
                          callback=self.parse_detail, meta={'item': item})

    def parse_detail(self, response):
        item = response.meta.get('item', {})
        title = response.xpath('//div[@class="content-header"]/text()').get()
        pubtime = response.xpath('//div[@class="text-from"]/span[@class="text-time"]/text()').get()
        # print(title, pubtime)
        content_ = response.xpath('//div[@class="newshare-content"]').getall()
        content_ = analysis(''.join(content_))
        # print(content_)

        item['pubtime'] = pubtime
        item['title'] = title
        item['url'] = response.url
        item['urlid'] = get_urlid(item['url'])
        item['websiteId'] = 2141422577  # 提供app对应的id
        item['pubSource'] = "新府城"  # 提供app对应名称
        item['content'] = content_

        for k, v in item.items():
            if not v:
                v = ''
            if isinstance(v, str):
                v = v.strip()
            item[k] = v
        yield item
