# encoding: utf-8

import time
import uuid
import datetime
import re
import json

import scrapy
from scrapy import Request, FormRequest

try:
    from App_Spd.items import AppSpdItem
    from App_Spd.utils import analysis, get_urlid, get_pubtime, timestamp_now_to
except:
    from app_spider.items import AppSpdItem
    from app_spider.utils import analysis, get_urlid, get_pubtime, timestamp_now_to


class MeilishuchengAppSpider(scrapy.Spider):
    websiteId = '2141421707'
    pubSource = '美丽舒城'
    name = 'meilishucheng_app'

    channels = [
        'ywsd',  # 时政
        'zhxw',  # 综合
        'xiangzhenjujiao',  # 乡镇
        'tzgg',  # 通知公告
        'wmksc',  # 上级媒体看舒城
    ]
    list_api = 'http://www.scgdj.com/apps/index.php?id=xinwen&cate={}&pageid={}'
    detail_url = 'http://www.scgdj.com/apps/index.php?id={}'

    def start_requests(self):
        for chl in self.channels:
            for page in range(1, 10):
                yield Request(url=self.list_api.format(chl, page))

    def parse(self, response, **kwargs):
        resp = response.json()
        for rs in resp['content']['rs_lists']:
            item = AppSpdItem()
            item['title'] = rs.get('title', '')
            item['author'] = rs.get('writer', '')

            url = rs.get('url')
            if not url:
                url = self.detail_url.format(rs.get('id'))
            item['url'] = url
            item['urlid'] = get_urlid(item['url'])
            item['pubtime'] = timestamp_now_to(rs.get('dateline'))
            item['content'] = analysis(rs.get('content', '').replace('&lt', '<').replace('&gt;', '>'))
            item['websiteId'] = 2141421707  # 提供app对应的id
            item['pubSource'] = "美丽舒城"  # 提供app对应名称
            yield item
