# -*- coding: utf-8 -*-
import json
import time
import scrapy
import hashlib
import random
from MeiHaoTong.user_agent import Android

cate_info = {
    '育儿':'30',
 '正能量':'139',
 '孕产':'138',
 '教育':'137',
 '手机':'136',
 '养生':'135',
 '三农':'134',
 '搞笑':'133',
 '星座':'132',
 '文化':'131',
 '数码':'130',
 '情感':'127',
 '科学':'126',
 '科技':'125',
 '股票':'124',
 '动漫':'123',
 '宠物':'121',
 '游戏':'29',
 '娱乐':'31',
 '旅游':'32',
 '汽车':'33',
 '家居':'34',
 '美食':'35',
 '军事':'38',
 '历史':'39',
 '健康':'40',
 '体育':'42',
 '财经':'119',
 '彩票':'120',
 '传媒':'122'}
cate_id = {
    "宠物":"宠物",
    "彩票":"彩票",
    "news_finance":"财经",
    "news_sports":"体育",
    "news_health":"健康",
    "media":"传媒",
    "news_game":"游戏",
    "news_military":"军事",
    "news_food":"美食",
    "news_entertainment":"娱乐",
    "news_car":"汽车",
    "news_travel":"旅游",
    "news_baby":"育儿",
    "news_history":"历史",
    "positive":"正能量",
    "pregnancy":"孕产",
    "news_edu":"教育",
    "cellphone":"手机",
    "news_regimen":"养生",
    "news_agriculture":"三农",
    'funny':"搞笑",#'搞笑'
    "news_comic":"动漫",
    "news_culture":"文化",
    'news_tech':'科技',
    "digital":"数码",
    "science_all":"科学",
    "stock":"股票",
    "news_astrology":"星座",
}

class JrHotSpider(scrapy.Spider):
    name = 'jrtt'
    url = 'https://www.toutiao.com/api/article/user_log/?c=toutiao_desc&sid=g25fyfrev1515496636120&type=pageview&t=1551687734217'
    def start_requests(self):
        for i in range(1):
            time.sleep(0.5)
            yield scrapy.Request(self.url,callback=self.get_url)

    def get_url(self,response):
        now = round(time.time())
        e = hex(int(now)).upper()[2:]  # hex()转换一个整数对象为十六进制的字符串表示
        i = hashlib.md5(str(int(now)).encode()).hexdigest().upper()  # hashlib.md5().hexdigest()创建hash对象并返回16进制结果
        if len(e) != 8:
            zz = {'as': "A1F5FB4CA4435D9",
                  'cp': "5BC433C51DD94E1"}
            return zz
        n = i[:5]
        a = i[-5:]
        r = ""
        s = ""
        for i in range(5):
            s = s + n[i] + e[i]
        for j in range(5):
            r = r + e[j + 3] + a[j]
        zz = {
            'as': "A1" + s + e[-3:],
            'cp': e[0:3] + r + "E1"
        }
        for tag in cate_id:
            urls = 'https://m.toutiao.com/list/?tag={}&ac=wap&count=20&format=json_raw&as={}&cp={}&min_behot_time=0&_signature=6uiYcQAAsTdAx9jxmXioXuromG&i='
            urls = urls.format(tag,zz['as'],zz['cp'])
            cate = cate_id.get(tag)
            yield scrapy.Request(urls,headers={'User-Agent': random.choice(Android)},callback=self.parse,meta={'cate':cate})

    def parse(self, response):
        res = json.loads(response.text).get('data')
        for info in res:
            item = {}
            has_video = info.get('has_video')
            if has_video:
                continue
            if int(info.get('has_mp4_video'))!=0:
                continue
            item['title'] = info.get('title')
            item['url'] = info.get('url')
            if 'mp.weixin.qq.com' in item['url']:
                continue
            timestamp = info.get('publish_time')
            item['publish_time'] = time.strftime('%Y-%m-%d %X',time.localtime(timestamp))
            item['author'] = info.get('source')
            item['comment_count'] = info.get('comment_count')
            item['site_classify_id'] = cate_info.get(response.meta['cate'])
            item['site_id'] = 3 #3表示头条号
            browse_count = info.get('repin_count') #把repin_count当作浏览量
            if browse_count/3<=item['comment_count']:
                browse_count+=item['comment_count']*99
            if browse_count<100: #如果评论量为0，默认浏览量是500
                browse_count = 586
            item['browse_count'] = browse_count
            if item['author']=='BTV养生堂' or item["author"]=="我就是来标标的":
                continue
            if 'bbonfire' in item['url']:
                continue
            yield item
            # print(item)


