# -*- coding: utf-8 -*-
# @Time    : 2019/12/12 10:38
# @Author  : Damn7Kx
# @Software: PyCharm
import datetime
import json
import random
from urllib.parse import urljoin
import scrapy
from NewsSpider.items import NewsItem
from NewsSpider.tools.utils import Utils
from NewsSpider.tools.filter_time import Times
from NewsSpider.tools.parse_html import extract_html
from NewsSpider.tools.redis_db import Redis_DB
from w3lib.html import remove_tags

class AiKanNews(scrapy.Spider):
    '''
        api获取 遍历访问 详情页需访问
    '''

    name = 'Sohu'
    # https://api.k.sohu.com/api/channel/v6/news.go?num=20&page=1&channelId=13557
    # https://api.k.sohu.com/api/channel/v5/news.go?channelId=960590&num=20
    # http://api.k.sohu.com/api/news/v5/article.go?newsId=412072215&p1=NjYxMDcwNzE5NjMzMjYxMjM0NQ==
    # p1为base64 解密之后任意修改再加密即可
    base_url = 'http://v2.sohu.com/integration-api/mix/region/'
    origin = 'http://www.sohu.com'

    Ua = [
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/72.0.3626.101 Mobile/15E148 Safari/605.1",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) FxiOS/15.0b13894 Mobile/16D57 Safari/605.1.15",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) FxiOS/8.1.1 Mobile/16D57 Safari/605.1.15",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) OPiOS/16.0.14.122053 Mobile/16D57 Safari/9537.53",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) OPT/2 Mobile/16D57",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) OPiOS/12.0.5.3 Version/7.0 Mobile/16D57 Safari/9537.53",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 EdgiOS/42.10.3 Mobile/16D57 Safari/605.1.15",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/16D57 unknown BingWeb/6.9.8.1",
    ]
    headers = {
        "User-Agent": random.choice(Ua),
    }
    t = Times()
    redis = Redis_DB()

    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'NewsSpider.middlewares.ProxyIPMiddleware': 544,
        },
        'ITEM_PIPELINES': {
            'NewsSpider.pipelines.KafkaPipeline': 544,
        }
    }

    def start_requests(self):
        for i in range(1, 192):
            url = '{base_url}{i}?size={size}'.format(base_url=self.base_url, i=str(i), size='200')
            # print(url)
            yield scrapy.Request(url, headers=self.headers,callback=self.parse_text,dont_filter=True)

    def parse_text(self,response):
        print("正在访问:",response.url)
        datas = json.loads(response.text)
        data_ = datas['data']
        if data_:
            for d in data_:
                dicts = {}
                url = urljoin(self.origin,d['url'])
                dataSource = d['authorName']
                id = Utils.url_hash(url)
                title = d['title']
                pubdate_datetime = d['publicTime']
                pubdate = Utils.process_timestamp(pubdate_datetime)
                pubdate = str(self.t.datetimes(pubdate))
                dicts['url'] = url
                dicts['id'] = id
                dicts['title'] = title
                dicts['author'] = dataSource
                dicts['pubdate'] = pubdate
                dicts['dataSource'] = dataSource
                if not self.t.time_is_Recent(pubdate):
                    yield None
                else:
                    # 获取详情页
                    yield scrapy.Request(url=url,headers=self.headers,callback=self.parse,dont_filter=True,meta=dicts)

    def parse(self, response):
        # print(response.text)
        item = NewsItem()
        item['id'] = response.meta['id']
        if self.redis.check_exist_2("wenzhangquchong", item['id'], '') == 0:
            print('该id:%s已存在' % item['id'])
            yield None
        else:
            try:
                content = remove_tags(response.css("#mp-editor").extract_first())
            except:
                content = extract_html(response.text)
            item['url'] = response.meta['url']
            item['title'] =response.meta['title']
            item['pubdate'] =response.meta['pubdate']
            item['content'] =content
            item['author'] = response.meta['author']
            item['formats'] = "web"
            item['dataSource'] = response.meta['dataSource']
            item['serchEnType'] = "搜狐新闻"
            item['html'] = content
            item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
            item['collectProcess'] = 'crawl_news'
            item['serverIp'] = "113.128.12.74"
            yield item

            # 移动端
            item['formats'] = "app"
            m_url = response.meta['url'].replace("www", 'm')
            item['url'] = m_url
            item['id'] = Utils.url_hash(m_url)
            if self.redis.check_exist_2("wenzhangquchong", Utils.url_hash(m_url), '') == 0:
                print('该id:%s已存在' % Utils.url_hash(m_url))
                return None
            yield item