import json

import scrapy
from urllib import parse
from WeiBoScrapy.items import WeiBoArticleItem, UserItem, CommentItem
from fake_headers import Headers
import redis


class WeiBoSpider(scrapy.Spider):
    # 爬虫名称
    name = "WeiBoSpider"
    # 爬取的页面大小
    page = 100
    # 爬取的关键字
    topic = "新疆棉花"
    allowed_domains = ["weibo.cn"]
    # 微博文章API
    base_url = "https://m.weibo.cn/api/container/getIndex?containerid=100103type%3D1%26q%3D{}&page_type=searchall&page={}"
    # 微博评论API
    comment_base_url = "https://m.weibo.cn/comments/hotflow?id={}&mid={}&max_id_type=0"

    def __init__(self):
        self.header = Headers(
            browser="chrome",  # Generate only Chrome UA
            os="win",  # Generate ony Windows platform
            headers=True  # generate misc headers
        )
        self.redisPool = redis.ConnectionPool(host="master", port="6379")

    # 爬虫启动时 会执行以此start_requests方法
    def start_requests(self):
        for i in range(self.page):
            url = self.base_url.format(parse.quote(self.topic, encoding="utf8"), i)
            yield scrapy.Request(url=url, callback=self.articleParse, headers=self.header.generate())

    def articleParse(self, response):
        jsonObj = json.loads(response.text)
        # 从redis连接池中创建redis连接
        redisConn = redis.StrictRedis(connection_pool=self.redisPool)
        data = jsonObj.get("data", None)
        if data is not None:
            cards = data.get("cards", None)
            if cards is not None:
                for card in cards:
                    mblog = card.get("mblog", None)
                    if mblog is not None:
                        user = mblog["user"]
                        id = mblog["id"]
                        mid = mblog["mid"]
                        # 判断 微博文章ID是否已经存在
                        # 不存在执行下面的逻辑
                        if not redisConn.sismember("weiboArticleIDs", id):
                            # 将新的微博文章ID写入redis的set中
                            redisConn.sadd("weiboArticleIDs", id)
                            weiBoArticle = WeiBoArticleItem()
                            weiBoArticle["id"] = mblog["id"]
                            redisConn.sadd("weiboArticleIDs", mblog["id"])
                            weiBoArticle["comments_count"] = mblog["comments_count"]
                            weiBoArticle["created_at"] = mblog["created_at"]
                            weiBoArticle["source"] = mblog["source"]
                            weiBoArticle["reposts_count"] = mblog["reposts_count"]
                            weiBoArticle["attitudes_count"] = mblog["attitudes_count"]
                            weiBoArticle["text"] = mblog["text"]
                            weiBoArticle["user_id"] = user["id"]
                            yield weiBoArticle

                        user_id = user["id"]
                        # 判断 用户ID是否已经存在
                        # 不存在执行下面的逻辑
                        if not redisConn.sismember("userIDs", user_id):
                            # 将新的用户ID写入redis的set中
                            redisConn.sadd("userIDs", user_id)
                            # 使用封装的constructUserItem方法构建UserItem
                            userItem = self.constructUserItem(user)

                            yield userItem
                        # 构建评论的API URL
                        comment_url = self.comment_base_url.format(id, mid)
                        # 发起请求，爬取微博评论
                        # 使用meta方法，将微博ID传递给commentParse中的response
                        # 使用fake_headers随机生成请求头
                        yield scrapy.Request(url=comment_url, callback=self.commentParse, meta={"weiboID": id},
                                             headers=self.header.generate())

    def commentParse(self, response):
        # 从request中传过来的meta中获取微博ID
        weiBoId = response.meta.get("weiboID")
        jsonObj = json.loads(response.text)
        data = jsonObj.get("data", None)
        redisConn = redis.StrictRedis(connection_pool=self.redisPool)
        if data is not None:
            comments = data.get("data", None)
            if comments is not None:
                for comment in comments:
                    comment_id = comment["id"]
                    # 判断 评论ID是否已经存在
                    # 不存在执行下面的逻辑
                    if not redisConn.sismember("commentIDs", comment_id):
                        # 将新的评论ID写入redis的set中
                        redisConn.sadd("commentIDs", comment_id)
                        commentItem = CommentItem()
                        commentItem["comment_id"] = comment["id"]
                        commentItem["comment_time"] = comment["created_at"]
                        commentItem["like_count"] = comment["like_count"]
                        commentItem["comment_text"] = comment["text"]
                        user = comment["user"]
                        user_id = user["id"]
                        commentItem["user_id"] = user_id
                        commentItem["weibo_id"] = weiBoId
                        yield commentItem
                        # 判断 用户ID是否已经存在
                        # 不存在执行下面的逻辑
                        if not redisConn.sismember("userIDs", user_id):
                            # 将新的用户ID写入redis的set中
                            redisConn.sadd("userIDs", user_id)
                            # 使用封装的constructUserItem方法构建UserItem
                            userItem = self.constructUserItem(user)
                            yield userItem

    # articleParse 以及 commentParse 中都需要构建UserItem
    # 故进行封装
    def constructUserItem(self, user):
        userItem = UserItem()
        userItem["id"] = user["id"]
        userItem["gender"] = user["gender"]
        userItem["screen_name"] = user["screen_name"]
        userItem["followers_count"] = user["followers_count"]
        userItem["follow_count"] = user["follow_count"]
        return userItem
