""""
爬虫模块

1、爬取的关键字不能固定

2、爬虫要一直运行 比如每隔几分钟爬取一次
怎么启动多个爬虫爬取数据

3、数据实时保存到kafka中

kafka-topics.sh --create --zookeeper master:2181,node1:2181,node2:2181 --replication-factor 1 --partitions 3 --topic wz
kafka-topics.sh --create --zookeeper master:2181,node1:2181,node2:2181 --replication-factor 1 --partitions 3 --topic user
kafka-topics.sh --create --zookeeper master:2181,node1:2181,node2:2181 --replication-factor 1 --partitions 3 --topic comment




kafka-console-consumer.sh --zookeeper   master:2181,node1:2181,node2:2181   --from-beginning --topic wz
kafka-console-consumer.sh --zookeeper   master:2181,node1:2181,node2:2181   --from-beginning --topic user
kafka-console-consumer.sh --zookeeper   master:2181,node1:2181,node2:2181   --from-beginning --topic comment

"""

import requests
import json
from urllib import parse
import time
from kafka import KafkaProducer
import re

# 创建kafka连接
produce = KafkaProducer(bootstrap_servers="master:9092,node1:9092,node2:9092")

wd = "罗志祥"
# 对中文进行编码
wd = parse.quote(wd)

url = "https://m.weibo.cn/api/container/getIndex?" \
      "containerid=100103type%3D60%26q%3D$1%26t%3D0&page_type=searchall&page=$2"

# 替换关键字
url = url.replace("$1", wd)

for i in range(5):
    # 将i值赋给 第二个参数，爬取不同的页面
    result = requests.get(url.replace("$2", str(i)))

    # 转换成json对象
    js = json.loads(result.text)

    # cards是数据数组
    for card in js["data"]["cards"]:

        re_h = re.compile('</?\w+[^>]*>')  # 去掉HTML标签
        # 一条微博数据
        mblog = card["mblog"]
        ##############微博文章数据##############
        # 微博创建时间
        create_date = mblog["created_at"]
        # 微博id
        id = mblog["id"]
        mid = mblog["mid"]
        # 微博内容
        text = mblog["text"]
        text=re_h.sub("",text)
        # 数据来源
        source = mblog["source"]
        # 转发数
        reposts_count = mblog["reposts_count"]
        # 评价数
        comments_count = mblog["comments_count"]
        # 点赞数
        attitudes_count = mblog["attitudes_count"]

        ##############微博作者的数据集合##############
        user = mblog["user"]
        ##############微博用户数据1##############
        # 微博作者的唯一id（可以作为hbase的rowkey）
        user_id = user["id"]
        # 微博作者名字
        user_name = user["screen_name"]
        # 微博作者性别
        gender = user["gender"]
        # 微博作者的简介
        description = user["description"]
        # 关注数
        follow_count = user["follow_count"]
        # 粉丝数
        followers_count = user["followers_count"]

        ##############微博数据打入kafka##############

        # 将数据写入kafka，每一类数据单独一个topic
        line = "%s|%s|%s|%s|%s|%s" % (id, user_id, source, reposts_count, comments_count, attitudes_count)
        # 微博文章数据写入到kafka
        produce.send(topic="wz", value=line.encode("utf-8"))
        produce.flush()

        ##############微博作者数据打入kafka##############
        user_str = "%s|%s|%s|%s|%s|%s" % (user_id, user_name, gender, description, follow_count, followers_count)

        produce.send(topic="user", value=user_str.encode("utf-8"))
        produce.flush()

        ##############爬取评价##############
        # 根据微博id 和 微博mid 构建评价url
        comment_url = "https://m.weibo.cn/comments/hotflow?id=$1&mid=$2&max_id_type=0"
        # 爬取评价数据
        comment_result = requests.get(comment_url.replace("$1", id).replace("$2", mid))
        # 获取评价的json内容
        comment_json = json.loads(comment_result.text)

        # 遍历评价内容

        for comment in comment_json["data"]["data"]:
            ##############微博评价数据##############
            # 创建时间
            created_at = comment["created_at"]
            # 评价id
            comment_id = comment["id"]
            # 评价内容
            comment_text = comment["text"]
            # 点赞数
            like_count = comment["like_count"]

            ##############评价人的数据集合##############
            user1 = comment["user"]
            # 评价人id
            user_id1 = user1["id"]
            # 评价人名字
            user_name1 = user1["screen_name"]
            # 评价人性别
            gender1 = user1["gender"]
            # 评价人简介
            description1 = user1["description"]
            # 关注数
            follow_count1 = user1["follow_count"]
            # 粉丝数
            followers_count1 = user1["followers_count"]

            ##############微博评论人数据打入kafka##############
            user_str1 = "%s|%s|%s|%s|%s|%s" % (
                user_id1, user_name1, gender1, description1, follow_count1, followers_count1)

            produce.send(topic="user", value=user_str1.encode("utf-8"))
            produce.flush()

            ##############微博评价内容打入kafka##############
            comment_str = "%s|%s|%s|%s|%s" % (comment_id, created_at, like_count, user_id1, comment_text)

            produce.send(topic="comment", value=comment_str.encode("utf-8"))
            produce.flush()
    time.sleep(1000)

produce.close()
