# encoding: utf-8

import time
import uuid
import datetime
import re
import json

import scrapy
from scrapy import Request, FormRequest

try:
    from App_Spd.items import AppSpdItem
    from App_Spd.utils import analysis, get_urlid, get_pubtime
except:
    from app_spider.items import AppSpdItem
    from app_spider.utils import analysis, get_urlid, get_pubtime

headers = {
    'Host': 'c.ahwanyun.cn',
    'Connection': 'Keep-Alive',
    'User-Agent': 'okhttp/3.8.0',
    'Accept-Encoding': 'gzip, deflate',
}


class HuangshanqurongmeiAppSpider(scrapy.Spider):
    websiteId = '2141421716'
    pubSource = '黄山区融媒'
    name = 'huangshanqurongmei_app'
    list_url = 'https://c.ahwanyun.cn/api/plate/cmsPlateSection/AppGetList?channelId=187&userId=&pageNum={}&appId=95'
    detail_url = 'https://c.ahwanyun.cn/api/product/cmsContent/getContentDetails?dataObjId=1&id={}&appId=95'

    def start_requests(self):
        for i in range(1, 6):
            yield Request(url=self.list_url.format(i), headers=headers)

    def parse(self, response, **l):
        resp = response.json()
        for row in resp['rows'][0]['contentMessageVOList']:
            yield Request(url=self.detail_url.format(row.get('id')), callback=self.parse_detail)

    def parse_detail(self, response):
        resp = response.json()

        item = AppSpdItem()
        item['websiteId'] = 2141421716  # 提供app对应的id
        item['pubSource'] = "黄山区融媒"  # 提供app对应名称
        item['title'] = resp.get('data', {}).get('title', '')
        item['author'] = resp.get('data', {}).get('createCpName', '')
        item['url'] = response.url
        item['urlid'] = get_urlid(item['url'])
        item['pubtime'] = resp.get('data', {}).get('createTime', '')
        item['content'] = analysis(resp.get('data', {}).get('articleDetails', ''))
        yield item
