# -*- coding: utf-8 -*-
# @Time    : 2019/12/18 15:21
# @Author  : Damn7Kx
# @Software: PyCharm
import datetime
import json
import random
from urllib.parse import unquote
import scrapy
from NewsSpider.items import NewsItem
from NewsSpider.tools.utils import Utils
from NewsSpider.tools.filter_time import Times
from NewsSpider.tools.parse_html import extract_html
from NewsSpider.tools.redis_db import Redis_DB
from w3lib.html import remove_tags


class DongFangNews(scrapy.Spider):
    '''
        东方头条 api获取 详情页访问 时间戳翻页
    '''

    name = 'Dongfang'
    types = ['toutiao', 'redian', 'jinan', 'yule', 'shehui', 'tiyu', 'jiankang', 'caijing',
             'keji', 'junshi', 'guoji', 'xiaohua', 'lishi', 'qiche', 'qinggan', 'xingzuo',
             'nba', 'shishang', 'youxi', 'guonei', 'duanzi', 'kexue', 'hulianwang', 'shuma',
             'baojian', 'jianshen', 'yinshi', 'jianfei', 'cba', 'dejia', 'yijia', 'wangqiu',
             'zhongchao', 'xijia', 'yingchao', 'qipai', 'gaoerfu', 'paiqiu',
             'yumaoqiu', 'jiaju', 'waihui', 'baoxian', 'budongchan', 'huangjin', 'xinsanban',
             'gupiao', 'qihuo', 'jijin', 'licai', 'dianying', 'dianshi', 'bagua', 'sannong']

    Ua = [
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/72.0.3626.101 Mobile/15E148 Safari/605.1",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) FxiOS/15.0b13894 Mobile/16D57 Safari/605.1.15",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) FxiOS/8.1.1 Mobile/16D57 Safari/605.1.15",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) OPiOS/16.0.14.122053 Mobile/16D57 Safari/9537.53",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) OPT/2 Mobile/16D57",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) OPiOS/12.0.5.3 Version/7.0 Mobile/16D57 Safari/9537.53",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 EdgiOS/42.10.3 Mobile/16D57 Safari/605.1.15",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/16D57 unknown BingWeb/6.9.8.1",
    ]
    headers = {
        "User-Agent": random.choice(Ua),
    }
    t = Times()
    count = 0
    redis = Redis_DB()

    custom_settings = {
        'DOWNLOAD_DELAY' : 0.5,
        'DOWNLOADER_MIDDLEWARES': {
            'NewsSpider.middlewares.ProxyIPMiddleware': 544,
            'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
        },
        'ITEM_PIPELINES': {
            'NewsSpider.pipelines.KafkaPipeline': 544,
        }
    }

    def start_requests(self):
        for type in self.types:
            params = {
                'type': type,
                'pgnum': '1',
                'idx': '0',
            }
            data = json.dumps(params)
            base_url = 'https://refreshnews2.dftoutiao.com/TtNews/newsgzip'
            yield scrapy.FormRequest(base_url, headers=self.headers,body=data,
                                     callback=self.parse_text,dont_filter=True,meta={"type":type,"pgnum":1,"idx":0})

    def parse_text(self,response):
        print("正在访问起始页:",response.url)
        datas = json.loads(response.text)
        if datas:
            data_ = datas['data']
            type = response.meta['type']
            pgnum = response.meta['pgnum']
            idx = response.meta['idx']
            if pgnum > 4:
                pass
            else:
                new_pgnum = pgnum+1
                new_idx = idx + 12
                params = {
                    'type': type,
                    'pgnum': str(new_pgnum),
                    'idx': str(new_idx),
                }
                data = json.dumps(params)
                base_url = 'https://refreshnews2.dftoutiao.com/TtNews/newsgzip'
                yield scrapy.FormRequest(base_url, headers=self.headers, body=data,
                                         callback=self.parse_text, dont_filter=True,
                                         meta={"type": type, "pgnum": new_pgnum, "idx": new_idx})
            for d in data_:
                dicts = {}
                url = d['url']
                if 'http' not in url:
                    print(f"当前url: {url}已忽略")
                    continue
                appurl = d['appurl']
                dataSource = '东方头条'
                try:
                    author = d['source']
                except:
                    author = ''
                id = Utils.url_hash(url)
                title = d['topic']
                date = d['date']
                pubdate = str(self.t.datetimes(date))
                if not self.t.time_is_Recent(pubdate):
                    yield None
                else:
                    dicts['url'] = url
                    dicts['appurl'] = appurl
                    dicts['id'] = id
                    dicts['title'] = title
                    dicts['pubdate'] = pubdate
                    dicts['dataSource'] = dataSource
                    dicts['author'] = author
                    if self.redis.check_exist_2("wenzhangquchong", id, '') == 0:
                        print('该id:%s已存在' % id)
                        yield None
                    else:
                        # 获取详情页
                        yield scrapy.Request(url=url,headers=self.headers,callback=self.parse,dont_filter=True,meta=dicts)

    def parse(self, response):
        item = NewsItem()
        try:
            content = extract_html(response.text)
        except:
            content = ''
        item['id'] = response.meta['id']
        item['url'] = response.meta['url']
        item['title'] =response.meta['title']
        item['pubdate'] =response.meta['pubdate']
        item['content'] = content
        item['author'] = response.meta['author']
        item['formats'] = "web"
        item['dataSource'] = response.meta['dataSource']
        item['serchEnType'] = "东方头条"
        item['html'] = content
        item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        item['collectProcess'] = 'crawl_news'
        item['serverIp'] = "113.128.12.74"
        # print(item)
        yield item
        # 移动端
        item['url'] = response.meta['appurl']
        m_id = Utils.url_hash(response.meta['appurl'])
        item['id'] = m_id
        item['formats'] = "app"
        yield item
