# encoding: utf-8

import time
import uuid
import datetime
import re
import json

import scrapy
from scrapy import Request, FormRequest

try:
    from App_Spd.items import AppSpdItem
    from App_Spd.utils import analysis, get_urlid, get_pubtime
except:
    from app_spider.items import AppSpdItem
    from app_spider.utils import analysis, get_urlid, get_pubtime

headers = {
    'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
    'User-Agent': 'Dalvik/2.1.0 (Linux; U; Android 5.1.1; OPPO R17 Build/NMF26X)',
    'Host': '112.27.85.130:81',
}


class ShitaikandianAppSpider(scrapy.Spider):
    websiteId = '2141407495'
    pubSource = '石台看点'
    name = 'shitaikandian_app'
    channels = [
        '771',  # 时政新闻
        '772',  # 社会新闻
        '794',  # 融媒党建
    ]
    list_api = 'http://112.27.85.130:81/app/terminal/terminal'

    def start_requests(self):
        for chl in self.channels:
            for page in range(1, 2):
                _js = {"categoryId": chl, "currentPage": str(page), "menuId": "news", "mode": "content",
                       "pageSize": "15", "terminal": "android"}
                yield Request(url=self.list_api, body=json.dumps(_js))

    def parse(self, response, **kwargs):
        resp = response.json()
        for itm in resp['content']['items']:
            _js = {"handleFlag": "1", "id": itm.get('id'),
                   "menuId": "news", "mode": "summary", "terminal": "android"}
            yield Request(url=self.list_api, body=json.dumps(_js), callback=self.parse_detail)

    def parse_detail(self, response):
        resp = response.json()
        item = AppSpdItem()
        item['title'] = resp['content']['title']
        item['author'] = resp['content']['author']
        item['pubtime'] = resp['content']['date']
        item['websiteId'] = 2141407495  # 提供app对应的id
        item['pubSource'] = "石台看点"  # 提供app对应名称

        try:
            link_url = resp['content']['recommendItems'][0]['targetView']['linkUrl']
            summary = resp['content']['summary']
            if summary:
                item['url'] = response.url
                item['urlid'] = get_urlid(item['url'])
                item['content'] = analysis(summary)
                yield item
                return
            if link_url:
                item['url'] = link_url
                item['urlid'] = get_urlid(item['url'])
                yield Request(url=link_url, meta={"item": item}, callback=self.parse_wx)
        except Exception as e:
            summary = resp['content']['summary']
            if summary:
                item['url'] = response.url
                item['urlid'] = get_urlid(item['url'])
                item['content'] = analysis(summary)
                yield item

    def parse_wx(self, response):
        content = response.xpath('//div[@class="rich_media_content "]').getall()
        content = ''.join(content)
        item = response.meta['item']
        item['content'] = analysis(content)
        yield item



