# -*- coding: utf-8 -*-
"""
Created on Thu Mar 19 13:53:27 2020

@author: Administrator
"""

import urllib
import urllib.request
import re
from bs4 import BeautifulSoup
import time
import random
import json
import math

myfile = open("tm_fz_gn_1_1_1.txt","a")
# 共获取四个变量，评论者昵称，是否超级会员，评论时间，评论内容
print("评论者昵称","是否超级会员","评论时间","comment",sep='|',file=myfile)
stop = random.uniform(0.5,2)

for i in range(3,5):
    try:
        url = 'https://rm.api.weibo.com/2/remind/push_count.json?trim_null=1&with_dm_group=true&with_settings=1&exclude_attitude=1&with_common_cmt=1&with_comment_attitude=1&with_common_attitude=1&with_moments=1&with_dm_unread=1&msgbox=true&with_page_group=1&with_chat_group=1&with_chat_group_notice=1&_pid=1&count=1&source=351354573&status_type=0&callback=STK_1584597081096'+str(2*i+1)
        req = urllib.request.Request(url)
        req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36')
        html = urllib.request.urlopen(req,timeout=500).read()
        html = bytes.decode(html,encoding="gbk")
        js = re.search('{"rateDetail(.*)', html).group()[:-1]
        infos0 = json.loads(js)
        infos = infos0['rateDetail']['rateList']
        tiaoshu = 0
        for info in infos:
            try:
                tiaoshu += 1
                time.sleep(stop)
                ss = "正在爬取第%d页的第%d条评论,共%d页" % (i,tiaoshu,lastpage)
                print(ss)
                # 用户姓名
                try:
                    user_name = info['displayUserNick'].strip().replace('\n','')
                except:
                    user_name = ""
                # 是否黄金会员
                try:
                    user_status = info['goldUser'].strip().replace('\n','')
                except:
                    user_status = ""
                # 评论时间
                try:
                    comment_date = info['rateDate'].strip().replace("\n","")
                except:
                    comment_date = ""
                # 评论内容
                try:
                    comment = info['rateContent'].strip().replace("\n","").replace('\t','')
                except:
                    comment = ""
                print(user_name,user_status,comment_date,comment,sep='|',file=myfile)
            except:
                sss = '爬取第%d页的第%d条评论失败,跳过爬取' % (i,tiaoshu)
                print(sss)
                pass
    except:
        print("该产品url获取失败，请检查")
myfile.close()




