# encoding: utf-8

import time
import uuid
import datetime
import re
import json

import scrapy
from scrapy import Request, FormRequest

# from App_Spd.items import AppSpdItem
# from App_Spd.utils import analysis, get_urlid, get_pubtime

from app_spider.items import AppSpdItem
from app_spider.utils import analysis, get_urlid, get_pubtime


class ZhangshangwuyiAppSpider(scrapy.Spider):
    websiteId = '2141422573'
    pubSource = '掌上武义'
    name = 'zhangshangwuyi_app'

    list_url = 'https://proxy.cztvcloud.com/api/fusion/channels/getChannelsInfo?appId=1007&channelId={}&page={}&platform=Android&appVersion=1.5.1'
    detail_url = 'https://proxy.cztvcloud.com/api/fusion/news/getNewsInfo?appId=1007&newsId={}&platform=Android&appVersion=1.5.1'

    channels = [
        '1734',  # 推荐
        '1749',  # 武义要闻
        '1756',  # 政情
        '1754',  # 省市要闻
    ]

    def start_requests(self):
        for chl in self.channels:
            for page in range(1, 10):
                yield Request(url=self.list_url.format(chl, page))

    def parse(self, response, **kwargs):
        resp = response.json()
        for itm in resp['data']['block'][0]['items']:
            yield Request(url=self.detail_url.format(itm['id']), callback=self.parse_detail)

    def parse_detail(self, response):
        resp = response.json()
        item = AppSpdItem()
        item['title'] = resp['data'].get('title', '')
        item['author'] = resp['data'].get('authorName', '')

        url = resp['data'].get('shareUrl')
        if not url:
            url = response.url

        item['url'] = url
        item['urlid'] = get_urlid(item['url'])
        item['pubtime'] = str(datetime.datetime.fromtimestamp(resp['data']['createdAt']))
        item['content'] = analysis(resp['data']['content'])
        item['websiteId'] = 2141422573  # 提供app对应的id
        item['pubSource'] = "掌上武义"  # 提供app对应名称

        yield item

