# -*- coding: utf-8 -*-
# @Time    : 2019/12/24 16:32
# @Author  : Damn7Kx
# @Software: PyCharm
import datetime
import json
import random
import scrapy
from NewsSpider.items import NewsItem
from NewsSpider.tools.utils import Utils
from NewsSpider.tools.filter_time import Times
from w3lib.html import remove_tags
from NewsSpider.tools.redis_db import Redis_DB

class ShengHuoQuanNews(scrapy.Spider):
    '''
       生活圈APP
    '''
    name = 'Shenghuoquan'

    citys = {586432: '通辽', 763365: '乐亭', 965004: '燕郊', 1121332: '靖州', 1159336: '固安', 1519662: '滕州',
           1564395: '日照', 1564403: '济宁', 1564450: '聊城', 1564455: '泰安', 1564467: '沧州', 1564469: '秦皇岛',
           1564471: '扬州', 1564476: '安阳', 1564477: '焦作', 1564484: '新乡', 1564507: '潍坊', 1564508: '淄博',
           1564509: '信阳', 1564511: '南阳', 1564515: '唐山', 1564517: '商丘', 1564523: '菏泽',
           1564526: '临沂', 1564529: '邯郸', 1564533: '保定', 1564536: '驻马店', 1564540: '襄阳',
           1564543: '宜昌', 1564547: '郑州'}
    types = {586432: [107077482, 104163227, 103552444, 103552349, 103552082, 103552384, 109013709, 105112471, 109899296, 103552398, 106502058, 103552383, 109899828, 109901759, 105014865, 109396237], 763365: [104598081, 104591175, 105049171, 109888755, 105049170, 107085154, 107085152, 107081094, 105043189, 108540740, 109901323, 105080538, 104598084, 105097673, 108546150, 109894982, 109900129, 109899240, 109899776], 965004: [109897265, 105641542, 109890911, 107601066, 109899191, 109901254, 109343766, 105641547, 107078218, 105989861, 106049072, 106853761, 109899476, 106120998, 105641554, 106038431], 1121332: [106476358, 106476372, 106477550, 109898497, 106477546, 106476359, 109896306, 106502486, 106477552, 109898681, 109898552, 109898557, 109898559, 109898607, 106477554], 1159336: [106721883, 106721886, 107079266, 1204243, 107090217, 106971865, 106770518, 106722912, 109892008, 106903929, 106722917, 106973308, 106726367, 109900876, 106775169, 106721905], 1519662: [109898603, 109890918, 109532955, 109532962, 109532957, 109532972, 109882431, 109606472, 109899154, 109894285, 109888172, 109892823, 109892506, 109606426, 109899954, 109899858, 109532952, 109532967], 1564395: [109898436, 109882165, 109882167, 109888048, 109882171, 109890905, 109882417, 109899744, 109890508, 109882176, 109882415, 109882164, 109882172], 1564403: [109885072, 109898355, 109890848, 109885078, 109885195, 109885074, 109887364, 109887368, 109899381, 109885081, 109896332, 109885216, 109896334, 109885069, 109885080], 1564450: [109899228, 109886160, 109886161, 109888813, 109886847, 109889631, 109886163, 109886855, 109886173, 109901286, 109899331, 109897405, 109900263, 109898846, 109886169, 109886167], 1564455: [109898509, 109886562, 109890891, 109890316, 109886612, 109886578, 109901146, 109886573, 109899366, 109887509, 109899144, 109886579, 109899292, 109886567, 109886571], 1564467: [109899181, 109892134, 109894268, 109892148, 109899398, 109892282, 109892154, 109892159, 109899141, 109894390, 109892281, 109899804, 109892143, 109892138], 1564469: [109892167, 109892177, 109899261, 109894950, 109892180, 109892284, 109899194, 109892185, 109894878, 109892286, 109892170, 109892174], 1564471: [109899315, 109892190, 109892201, 109892293, 109894652, 109894781, 109892196, 109892203, 109892294, 109894472, 109892192, 109892195], 1564476: [109899256, 109894552, 109894557, 109894561, 109899149, 109894567, 109894646, 109895035, 109894562, 109894571, 109894570], 1564477: [109898762, 109894583, 109894591, 109896036, 109894599, 109894595, 109894650, 109895038, 109894608, 109899782, 109899151], 1564484: [109898841, 109894613, 109894623, 109894653, 109894619, 109894626, 109899137, 109894625, 109899632, 109895048, 109894634, 109900464, 109894614, 109894631], 1564507: [109896734, 109896740, 109896747, 109896730, 109896728, 109896729, 109896735, 109896742, 109900254, 109896748, 109896754], 1564508: [109900135, 109900138, 109900142, 109900155, 109900145, 109900150, 109900151, 109900247, 109900160, 109900147, 109900156], 1564509: [109900165, 109900168, 109900172, 109900182, 109900169, 109900186, 109900177, 109900456, 109900183, 109900191, 109900192], 1564511: [109900197, 109900198, 109900204, 109900208, 109900212, 109900215, 109900218, 109900220, 109900259, 109900490, 109900224, 109900228], 1564515: [109900264, 109900269, 109900277, 109900287, 109900274, 109900280, 109900279, 109900283, 109900679, 109900290, 109900293], 1564517: [109900295, 109900298, 109900311, 109900306, 109900301, 109900313, 109900307, 109900316, 109900803, 109900725, 109900455, 109900319], 1564523: [109900326, 109900350, 109900329, 109900339, 109900334, 109900353, 109900340, 109900345, 109900672, 109900357, 109900358], 1564526: [109900362, 109900387, 109900370, 109900372, 109900385, 109900380, 109900381, 109900376, 109900844, 109900674, 109901293, 109901310, 109900386, 109900365], 1564529: [109900392, 109900394, 109900398, 109901092, 109900408, 109900842, 109900405, 109900413, 109900406, 109900410, 109900669, 109900417, 109900421], 1564533: [109900426, 109900429, 109900442, 109900433, 109900436, 109900440, 109900449, 109900664, 109900790, 109900446, 109900828, 109900450, 109900453], 1564536: [109900692, 109900697, 109900698, 109900700, 109900705, 109900710, 109900712, 109900713, 109901545, 109900722, 109900716, 109900720], 1564540: [109901183, 109901184, 109901186, 109901188, 109901193, 109901197, 109901199, 109901200, 109901268, 109901265, 109901205, 109901208], 1564543: [109901212, 109901216, 109901219, 109901221, 109901223, 109901225, 109901231, 109901505, 109901508, 109901226, 109901236, 109901240], 1564547: [109901341, 109901343, 109901344, 109901354, 109901346, 109901556, 109901374, 109901360, 109901357, 109901367, 109901349, 109901362, 109901366]}
    Ua = [
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 Mobile/15E148 Safari/604.1",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/72.0.3626.101 Mobile/15E148 Safari/605.1",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) FxiOS/15.0b13894 Mobile/16D57 Safari/605.1.15",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) FxiOS/8.1.1 Mobile/16D57 Safari/605.1.15",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) OPiOS/16.0.14.122053 Mobile/16D57 Safari/9537.53",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) OPT/2 Mobile/16D57",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) OPiOS/12.0.5.3 Version/7.0 Mobile/16D57 Safari/9537.53",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.0 EdgiOS/42.10.3 Mobile/16D57 Safari/605.1.15",
        "Mozilla/5.0 (iPhone; CPU iPhone OS 12_1_4 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/16D57 unknown BingWeb/6.9.8.1",
    ]
    headers = {
        "user-agent": random.choice(Ua),
        "Connection": "keep-alive",
        "app": "1564471",
        "device": "865547631807066",
    }

    t = Times()
    redis = Redis_DB()

    custom_settings = {
        'DOWNLOADER_MIDDLEWARES': {
            'NewsSpider.middlewares.ProxyIPMiddleware': 544,
        },
        'ITEM_PIPELINES': {
            'NewsSpider.pipelines.KafkaPipeline': 544,
        }
    }
    def start_requests(self):

        for b in self.types:
            urls = self.types[b]
            for i in urls:
                self.headers['app'] = str(b)
                url = 'https://api.zhiyueapp.cn/api/app/portalItemsNew'
                params = {
                    'clipId': str(i),
                    'offset': '0',
                    'note': '1',
                    'image': '0',
                    'sort': 'new'
                }
                yield scrapy.FormRequest(url, headers=self.headers, formdata=params,
                                         callback=self.parse_text, dont_filter=True,meta={"params":params,"number":1})

    def parse_text(self,response):
        print("正在访问:", response.url)
        datas = json.loads(response.text)
        params = response.meta['params']
        number = response.meta['number']
        if number > 3:
            pass
        else:
            next_id = datas['next']
            params['offset'] = str(next_id)
            url = 'https://api.zhiyueapp.cn/api/app/portalItemsNew'
            yield scrapy.FormRequest(url, headers=self.headers, formdata=params,
                                     callback=self.parse_text, dont_filter=True, meta={"params": params, "number": number+1})
        data_ = datas['articles']
        if data_:
            for d in data_:
                dicts = {}
                if d['type'] == 'article':
                    title = d['article']['title']
                    content_id = d['article']['itemId']
                    url = d['article']['orgUrl']
                    url2 = d['article']['cuttURL']
                    try:
                        author = d['article']['creator']['name']
                    except:
                        author = ''
                    id = Utils.url_hash(url)
                    pubdate_datetime = d['article']['articleTime']
                    pubdate = Utils.process_timestamp(pubdate_datetime)
                    pubdate = str(self.t.datetimes(pubdate))
                    if not self.t.time_is_Recent(pubdate):
                        yield None
                    else:
                        dicts['url'] = url
                        dicts['url2'] = url2
                        dicts['id'] = id
                        dicts['title'] = title
                        dicts['pubdate'] = pubdate
                        dicts['author'] = author
                        if self.redis.check_exist_2("wenzhangquchong", id, '') == 0:
                            print('该id:%s已存在' % id)
                            continue
                        else:
                            # 获取详情页
                            headers = {
                                "User-Agent":random.choice(self.Ua),
                                "device":"865547631807066"
                            }
                            get_content = f'https://api.zhiyueapp.cn/api/article/body?articleId={content_id}&type=articleId&comments=1&sort=1'
                            yield scrapy.Request(url=get_content, headers=headers, callback=self.parse,
                                                 dont_filter=True, meta=dicts)
                elif d['type'] == 'subjectArticle':
                    title = d['subject']['articleBvo']['socialShare']['title']
                    content_id = d['subject']['articleBvo']['itemId']
                    url = d['subject']['articleBvo']['orgUrl']
                    url2 = d['subject']['articleBvo']['cuttURL']
                    try:
                        author = d['subject']['articleBvo']['creator']['name']
                    except:
                        author = ''
                    id = Utils.url_hash(url)
                    pubdate_datetime = d['subject']['articleBvo']['articleTime']
                    pubdate = Utils.process_timestamp(pubdate_datetime)
                    pubdate = str(self.t.datetimes(pubdate))
                    if not self.t.time_is_Recent(pubdate):
                        yield None
                    else:
                        dicts['url'] = url
                        dicts['url2'] = url2
                        dicts['id'] = id
                        dicts['title'] = title
                        dicts['pubdate'] = pubdate
                        dicts['author'] = author
                        if self.redis.check_exist_2("wenzhangquchong", id, '') == 0:
                            print('该id:%s已存在' % id)
                            continue
                        else:
                            # 获取详情页
                            headers = {
                                "User-Agent": random.choice(self.Ua),
                                "device": "865547631807066"
                            }
                            get_content = f'https://api.zhiyueapp.cn/api/article/body?articleId={content_id}&type=articleId&comments=1&sort=1'
                            yield scrapy.Request(url=get_content, headers=headers, callback=self.parse,
                                                 dont_filter=True, meta=dicts)
                else:
                    pass

    def parse(self, response):
        # print(response.text)
        item = NewsItem()
        datas = json.loads(response.text)
        title = response.meta['title']
        pubdate = response.meta['pubdate']
        author = response.meta['author']
        html = datas['content']
        item['id'] = response.meta['id']
        item['url'] = response.meta['url']
        item['title'] = title
        item['pubdate'] = pubdate
        item['content'] = remove_tags(html)
        item['author'] = author
        item['formats'] = "app"
        item['dataSource'] = "生活圈"
        item['serchEnType'] = "生活圈"
        item['html'] = html
        item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        item['collectProcess'] = 'crawl_news'
        item['serverIp'] = "113.128.12.74"
        yield item
        # 移动端
        item['formats'] = "app"
        m_url = response.meta['url2']
        item['url'] = m_url
        item['id'] = Utils.url_hash(m_url)
        if self.redis.check_exist_2("wenzhangquchong", Utils.url_hash(m_url), '') == 0:
            print('该id:%s已存在' % Utils.url_hash(m_url))
        else:
            yield item