# -*- coding: utf-8 -*-
# @Time    : 2019/12/19 14:43
# @Author  : Damn7Kx
# @Software: PyCharm
import datetime
import json
import random
import re
import time
import scrapy
from NewsSpider.items import NewsItem
from NewsSpider.tools.utils import Utils
from NewsSpider.tools.filter_time import Times
from NewsSpider.tools.parse_html import extract_html
from NewsSpider.tools.redis_db import Redis_DB
from w3lib.html import remove_tags


class UCNews(scrapy.Spider):
    '''
        UC资讯 来源:橘子浏览器UC资讯 3种解析html  2个不同api  异常情况 单独解析
        无翻页 只需传入最新时间戳 获取都是最新数据
    '''

    name = 'Uc'

    types = ['90001', '200', '90004', '100', '10335', '179223212', '923258246', '1105405272',
             '1524583516', '32364874', '10001', '835729', '90003', '674534', '10000', '696724', '10005', '10006',
             '10007', '472933935', '589710362', '10008', '26325229', '169476544', '242677432', '1001932710',
             '1213442674',
             '1911322354', '1964289243', '1972619079', '1404457531635']

    citys = ['阿坝', '阿克苏', '阿拉尔', '阿勒泰', '澳门', '阿里', '安康', '安庆', '安顺', '安阳', '鞍山', '阿拉善盟', '巴中', '白城', '白沙', '白山', '白银',
             '百色', '蚌埠', '包头', '宝鸡', '保定', '保山', '保亭', '北海', '北京', '本溪', '毕节', '滨州', '亳州', '博乐', '博尔塔拉', '巴彦淖尔', '巴音郭楞',
             '重庆', '长春', '长沙', '长治', '沧州', '昌都', '昌吉', '昌江', '常德', '常州', '朝阳', '潮州', '郴州', '成都', '承德', '澄迈', '池州', '赤峰',
             '崇左', '滁州', '楚雄', '迪庆', '达州', '大理', '大连', '大庆', '大同', '大兴安岭', '丹东', '儋州', '德宏', '德阳', '德州', '定安', '定西',
             '东方',
             '东莞', '东营', '都匀', '鄂尔多斯', '鄂州', '恩施', '防城港', '佛山', '福州', '抚顺', '抚州', '阜新', '阜阳', '广州', '甘孜', '甘南', '赣州',
             '固原',
             '广安', '广元', '贵港', '贵阳', '桂林', '果洛', '哈尔滨', '哈密', '海北', '海东', '海口', '海西', '邯郸', '汉中', '杭州', '合肥', '合作',
             '和田',
             '河池', '河源', '菏泽', '贺州', '鹤壁', '鹤岗', '黑河', '衡水', '衡阳', '红河', '呼和浩特', '葫芦岛', '湖州', '怀化', '海南', '淮安', '淮北',
             '淮南',
             '黄冈', '黄南', '黄山', '黄石', '惠州', '呼伦贝尔', '鸡西', '吉安', '吉林', '吉首', '集宁', '济南', '济宁', '济源', '佳木斯', '嘉兴', '嘉峪关',
             '江门', '焦作', '揭阳', '金昌', '金华', '锦州', '晋城', '晋中', '荆门', '荆州', '景德镇', '景洪', '九江', '酒泉', '喀什', '开封', '凯里',
             '克拉玛依',
             '库尔勒', '昆明', '拉萨', '来宾', '莱芜', '兰州', '廊坊', '六盘水', '乐东', '乐山', '丽江', '丽水', '连云港', '凉山', '辽阳', '辽源', '聊城',
             '林芝',
             '临沧', '临汾', '临高', '临河', '临夏', '临沂', '陵水', '柳州', '六安', '陇南', '龙岩', '娄底', '泸州', '洛阳', '漯河', '吕梁', '马鞍山',
             '茂名',
             '眉山', '梅州', '绵阳', '牡丹江', '南京', '那曲', '南昌', '南充', '南宁', '南平', '南通', '南阳', '内江', '宁波', '宁德', '怒江', '攀枝花',
             '盘锦',
             '平顶山', '平凉', '萍乡', '莆田', '濮阳', '普洱', '七台河', '齐齐哈尔', '潜江', '钦州', '秦皇岛', '青岛', '清远', '庆阳', '琼海', '琼中', '衢州',
             '曲靖', '泉州', '黔南', '黔东南', '黔西南', '日喀则', '日照', '三门峡', '三明', '三亚', '山南', '汕头', '汕尾', '商洛', '商丘', '上海', '上饶',
             '韶关', '邵阳', '绍兴', '深圳', '神农架', '沈阳', '十堰', '石河子', '石家庄', '石嘴山', '双鸭山', '水城', '朔州', '四平', '松原', '苏州', '绥化',
             '随州', '遂宁', '宿迁', '宿州', '塔城', '台州', '太原', '泰安', '泰州', '唐山', '天津', '天门', '天水', '铁岭', '通化', '通辽', '铜川', '铜陵',
             '铜仁', '吐鲁番', '屯昌', '万宁', '威海', '潍坊', '渭南', '温州', '文昌', '文山', '乌海', '乌兰浩特', '乌鲁木齐', '无锡', '芜湖', '吴忠', '梧州',
             '五指山', '武都', '武汉', '武威', '乌兰察布', '厦门', '西安', '西宁', '锡林浩特', '仙桃', '咸宁', '咸阳', '香格里拉', '湘西', '湘潭', '襄阳',
             '孝感',
             '忻州', '新乡', '新余', '信阳', '邢台', '兴义', '徐州', '许昌', '宣城', '香港', '兴安盟', '西双版纳', '锡林郭勒', '营口', '雅安', '烟台', '延安',
             '延吉', '盐城', '扬州', '阳江', '阳泉', '伊春', '伊宁', '宜宾', '宜昌', '宜春', '益阳', '银川', '鹰潭', '永州', '榆林', '玉林', '玉树', '玉溪',
             '岳阳', '伊犁', '延边', '杨凌', '云浮', '运城', '枣庄', '湛江', '张家界', '张家口', '张掖', '漳州', '昭通', '肇庆', '镇江', '郑州', '中山',
             '中卫',
             '舟山', '周口', '珠海', '株洲', '驻马店', '资阳', '淄博', '自贡', '遵义']

    custom_settings = {
        # "LOG_LEVEL":'DEBUG',
        'DOWNLOADER_MIDDLEWARES': {
            'NewsSpider.middlewares.ProxyIPMiddleware': 544,
            'NewsSpider.middlewares.RandomUserAgentMiddleware': 543,
            'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': None,
        },
        'ITEM_PIPELINES': {
            'NewsSpider.pipelines.KafkaPipeline': 544,
        }
    }
    t = Times()
    count = 0
    redis = Redis_DB()

    def start_requests(self):
        for type in self.types:
            if type == '200':
                for city in self.citys:
                    timestamp = int(time.time() * 1000)
                    city_url = f'https://m.uczzd.cn/iflow/api/v1/channel/200?app=jiujiuliulanqih5-iflow&zzd_from=jiujiuliulanqih5-iflow&method=new&ftime={timestamp}&city_name={city}'
                    yield scrapy.Request(city_url, callback=self.parse_text, dont_filter=True)
            else:
                timestamp = int(time.time() * 1000)
                normal_url = f'https://m.uczzd.cn/iflow/api/v1/channel/{type}?app=jiujiuliulanqih5-iflow&zzd_from=jiujiuliulanqih5-iflow&method=new&ftime={timestamp}'
                yield scrapy.Request(normal_url, callback=self.parse_text, dont_filter=True)

    def parse_text(self, response):
        print("正在访问起始页:", response.url)
        datas = json.loads(response.text)
        if datas:
            data_ = datas['data']['articles']
            for d in data_:
                dicts = {}
                url = data_[d]['original_url']
                url2 = data_[d]['url']
                dataSource = 'UC资讯'
                try:
                    author = data_[d]['source_name']
                except:
                    author = ''
                id = Utils.url_hash(url)
                title = data_[d]['title']
                pubdate_datetime = data_[d]['publish_time']
                pubdate = Utils.process_timestamp(pubdate_datetime)
                pubdate = str(self.t.datetimes(pubdate))
                if not self.t.time_is_Recent(pubdate):
                    yield None
                else:
                    dicts['url'] = url
                    dicts['url2'] = url2
                    dicts['id'] = id
                    dicts['title'] = title
                    dicts['pubdate'] = pubdate
                    dicts['dataSource'] = dataSource
                    dicts['author'] = author
                    if self.redis.check_exist_2("wenzhangquchong", id, '') == 0:
                        print('该id:%s已存在' % id)
                        yield None
                    else:
                        # 获取详情页  这里两种获取Url详情的情况  若异常则传到解析html
                        try:
                            match_re = re.search('wm_[c|a]id=(.*?)!', url).group(1)
                            if match_re.isdigit():
                                get_content = f'https://ff.dayu.com/contents/{match_re}?biz_id=1002'
                                yield scrapy.Request(url=get_content, callback=self.parse,
                                                     dont_filter=True, meta=dicts)
                            else:
                                get_content2 = f'https://ff.dayu.com/contents/origin/{match_re}?biz_id=1002'
                                yield scrapy.Request(url=get_content2, callback=self.parse, dont_filter=True,
                                                     meta=dicts)
                        except:
                            yield scrapy.Request(url=url, callback=self.parse_html, dont_filter=True, meta=dicts)

    def parse(self, response):
        content_data = json.loads(response.text)
        item = NewsItem()
        html = content_data['data']['body']['text']
        item['id'] = response.meta['id']
        item['url'] = response.meta['url']
        item['title'] = response.meta['title']
        item['pubdate'] = response.meta['pubdate']
        item['content'] = remove_tags(html)
        item['author'] = response.meta['author']
        item['formats'] = "web"
        item['dataSource'] = response.meta['dataSource']
        item['serchEnType'] = "UC资讯"
        item['html'] = html
        item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        item['collectProcess'] = 'crawl_news'
        item['serverIp'] = "113.128.12.74"
        # print(item)
        yield item
        item['url'] = response.meta['url2']
        id_2 = Utils.url_hash(response.meta['url2'])
        item['id'] = id_2
        item['formats'] = "app"
        # print(item)
        yield item

    def parse_html(self, response):
        item = NewsItem()
        html = extract_html(response.text)
        item['id'] = response.meta['id']
        item['url'] = response.meta['url']
        item['title'] = response.meta['title']
        item['pubdate'] = response.meta['pubdate']
        item['content'] = html
        item['author'] = response.meta['author']
        item['formats'] = "web"
        item['dataSource'] = response.meta['dataSource']
        item['serchEnType'] = "UC资讯"
        item['html'] = html
        item['updateTime'] = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
        item['collectProcess'] = 'crawl_news'
        item['serverIp'] = "113.128.12.74"
        # print(item)
        yield item
        item['url'] = response.meta['url2']
        id_2 = Utils.url_hash(response.meta['url2'])
        item['id'] = id_2
        item['formats'] = "app"
        # print(item)
        yield item

