# 循环取redis中获取的url

import redis
import requests
import json
from kafka import KafkaProducer
import re
import time

# 创建kafka连接
produce = KafkaProducer(bootstrap_servers="master:9092,node1:9092,node2:9092")

# 创建连接
client = redis.Redis(host="master", port=6379)

print("==========爬虫已启动==========")

# 循环爬取数据
while True:

    # 防止被反爬
    time.sleep(3)
    # timeout=0 永不超时,brpop 先进先出的取出url
    tuple = client.brpop("search_urls", timeout=0)

    # 给取出的url解码
    url = tuple[1].decode("utf-8")
    # 获取舆情编号
    sent_id = client.get("flag_" + url).decode("utf-8")
    # 如果不解码，输出的看不懂
    print("正在爬取：" + url)

    # 获取数据
    result = requests.get(url)
    # 转换成json对象
    js = json.loads(result.text)

    # cards是数据数组
    for card in js["data"]["cards"]:
        # 去掉HTML标签
        re_h = re.compile('</?\w+[^>]*>')
        # 一条微博数据
        mblog = card["mblog"]
        ##############微博文章数据##############
        # 微博创建时间
        create_date = card["created_at"]
        # 微博id
        id = mblog["id"]
        mid = mblog["mid"]
        # 微博内容
        text = mblog["text"]
        text = re_h.sub("", text)
        # 数据来源
        source = mblog["source"]
        # 转发数
        reposts_count = mblog["reposts_count"]
        # 评价数
        comments_count = mblog["comments_count"]
        # 点赞数
        attitudes_count = mblog["attitudes_count"]

        ##############微博作者的数据集合##############
        user = mblog["user"]
        ##############微博用户数据1##############
        # 微博作者的唯一id（可以作为hbase的rowkey）
        user_id = user["id"]
        # 微博作者名字
        user_name = user["screen_name"]
        # 微博作者性别
        gender = user["gender"]
        # 微博作者的简介
        description = user["description"]
        # 关注数
        follow_count = user["follow_count"]
        # 粉丝数
        followers_count = user["followers_count"]

        ##############微博数据打入kafka##############

        # 将数据写入kafka，每一类数据单独一个topic
        line = "%s|%s|%s|%s|%s|%s|%s" % (id,sent_id, user_id, source, reposts_count, comments_count, attitudes_count)
        # 微博文章数据写入到kafka
        produce.send(topic="wz", value=line.encode("utf-8"))
        produce.flush()

        ##############微博作者数据打入kafka##############
        user_str = "%s|%s|%s|%s|%s|%s" % (user_id, user_name, gender, description, follow_count, followers_count)

        produce.send(topic="user", value=user_str.encode("utf-8"))
        produce.flush()

        ##############爬取评价##############
        # 根据微博id 和 微博mid 构建评价url
        comment_url = "https://m.weibo.cn/comments/hotflow?id=$1&mid=$2&max_id_type=0"
        # 爬取评价数据
        comment_result = requests.get(comment_url.replace("$1", id).replace("$2", mid))
        # 获取评价的json内容
        comment_json = json.loads(comment_result.text)
        print(comment_json)

        # 判断是否有数据,如果爬虫爬取太快，有的数据会为空或者有的没有评论
        if "data" in comment_json:
            if "data" in comment_json["data"]:
                # 遍历评价内容
                for comment in comment_json["data"]["data"]:
                    ##############微博评价数据##############
                    # 创建时间
                    created_at = comment["created_at"]
                    # 评价id
                    comment_id = comment["id"]
                    # 评价内容
                    comment_text = comment["text"]
                    # 点赞数
                    like_count = comment["like_count"]

                    ##############评价人的数据集合##############
                    user1 = comment["user"]
                    # 评价人id
                    user_id1 = user1["id"]
                    # 评价人名字
                    user_name1 = user1["screen_name"]
                    # 评价人性别
                    gender1 = user1["gender"]
                    # 评价人简介
                    description1 = user1["description"]
                    # 关注数
                    follow_count1 = user1["follow_count"]
                    # 粉丝数
                    followers_count1 = user1["followers_count"]
                    ##############微博评论人数据打入kafka##############
                    user_str1 = "%s|%s|%s|%s|%s|%s" % (
                        user_id1, user_name1, gender1, description1, follow_count1, followers_count1)

                    produce.send(topic="user", value=user_str1)
                    produce.flush()
                    ##############微博评价内容打入kafka##############
                    comment_str = "%s|%s|%s|%s|%s|%s" % (
                        comment_id,sent_id, created_at, like_count, user_id1, comment_text)
                    # 判断评价是否已存在
                    flag = client.sismember("comment_distinct_flag", comment_id)
                    if not flag:
                        produce.send(topic="comment", value=comment_str.encode("utf-8"))
                        produce.flush()
                        client.sadd("comment_distinct_flag", comment_id)
