from json import JSONDecodeError

import scrapy
import json
from ..items import WeiBoArticleItem, UserItem, CommentItem
from urllib.parse import quote
import redis


class WeiBoSpider(scrapy.Spider):
    # 爬虫名 启动时候需要用到
    name = 'WeiBoSpider'
    # 允许爬取的域名
    allowed_domains = ['weibo.cn']
    # 指定爬虫程序起始的url
    # start_urls = [
    #     'https://m.weibo.cn/api/container/getIndex?containerid=100103type%3D1%26q%3D%E5%85%83%E6%B0%94%E6%A3%AE%E6%9E%97&page_type=searchall'
    # ]

    # 定义请求头 进行伪装
    header = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36"
    }
    theme = '元气森林'

    url_format = 'https://m.weibo.cn/api/container/getIndex?containerid=100103type%3D1%26q%3D{}&page_type=searchall&page={}'
    comment_url_format = 'https://m.weibo.cn/comments/hotflow?id={}&mid={}&max_id_type=0'
    comment_next_url_format = 'https://m.weibo.cn/comments/hotflow?id={}&mid={}&max_id={}&max_id_type=0'

    # 为当前爬虫指定特定的pipeline进行处理
    # 300 优先级
    custom_settings = {
        "ITEM_PIPELINES": {'SpiderDemo.pipelines.SpiderdemoPipeline': 300,},
    }

    def __init__(self):
        # 创建redis连接池
        self.redisConnPool = redis.ConnectionPool(host='master', port=6379, max_connections=10)

    # 从写start_requests 手动发起请求
    def start_requests(self):
        for i in range(1, 80):
            # 手动发起请求
            # callback 指定回调函数
            # headers 指定请求头 伪装成浏览器
            theme_encode = quote(self.theme)
            url = self.url_format.format(theme_encode, i)
            yield scrapy.Request(url=url, callback=self.parse, headers=self.header)

    # 默认的解析函数
    def parse(self, response):
        redisConn = redis.Redis(connection_pool=self.redisConnPool)
        # 使用json模块解析返回的数据
        # 得到一个json对象 实际上就是python中的字典
        jsonObj = json.loads(response.text)
        data = jsonObj.get('data', None)
        if data is not None:
            cards = data.get('cards', None)
            if cards is not None:
                for card in cards:
                    mblog = card.get('mblog', None)
                    if mblog is not None:
                        mid = mblog['mid']  # 微博id
                        # 如果mid不存在weiboIDs所对应的集合中，则进行后续处理，并将其添加到集合中
                        if not redisConn.sismember('weiboIDs', mid):
                            redisConn.sadd('weiboIDs', mid)
                            # 解析微博
                            user = mblog['user']
                            # 解析用户的数据
                            userItem = self.construct_user_item(user)
                            yield userItem

                            created_at = mblog['created_at']  # 发表时间
                            source = mblog['source']  # 来源设备
                            text = mblog['text']  # 微博内容

                            attitudes_count = mblog['attitudes_count']  # 点赞数
                            comments_count = mblog['comments_count']  # 评论数
                            reposts_count = mblog['reposts_count']  # 转发数

                            # 创建一个Item对象
                            weiboArticleItem = WeiBoArticleItem()
                            weiboArticleItem['id'] = user['id']
                            weiboArticleItem['screen_name'] = user['screen_name']
                            weiboArticleItem['created_at'] = created_at
                            weiboArticleItem['source'] = source
                            weiboArticleItem['text'] = text
                            weiboArticleItem['mid'] = mid
                            weiboArticleItem['attitudes_count'] = attitudes_count
                            weiboArticleItem['comments_count'] = comments_count
                            weiboArticleItem['reposts_count'] = reposts_count
                            # 将Item发给pipeline做最后的处理
                            yield weiboArticleItem

                            # 发起请求 爬取该条微博对应的评论数据
                            comment_url = self.comment_url_format.format(mid, mid)
                            # 通过meta参数可以手动传递一些数据给下游的解析函数
                            yield scrapy.Request(url=comment_url, headers=self.header, callback=self.comment_parse,
                                                 meta={"mid": mid})

    # 解析评论数据
    def comment_parse(self, response):
        redisConn = redis.Redis(connection_pool=self.redisConnPool)
        mid = response.meta['mid']  # 微博id
        try:
            jsonObj = json.loads(response.text)
            data = jsonObj.get('data', None)
            if data is not None:
                comments = data.get('data', None)
                # 取出max_id 以构建下一页评论url
                max_id = data.get('max_id', None)
                if comments is not None:
                    # 解析每一条评论数据
                    for comment in comments:
                        id = comment['id']
                        # 如果mid不存在weiboIDs所对应的集合中，则进行后续处理，并将其添加到集合中
                        if not redisConn.sismember('commentIDs', id):
                            redisConn.sadd('commentIDs', id)
                            text = comment['text']
                            created_at = comment['created_at']

                            user = comment['user']
                            userItem = self.construct_user_item(user)
                            yield userItem

                            commentItem = CommentItem()
                            commentItem['mid'] = mid
                            commentItem['user_id'] = user['id']
                            commentItem['comment_id'] = id
                            commentItem['text'] = text
                            commentItem['created_at'] = created_at
                            # 发送给pipeline做后续的处理
                            yield commentItem
                if max_id is not None:
                    # 发起下一页的评论请求
                    comment_next_url = self.comment_next_url_format.format(mid, mid, max_id)
                    yield scrapy.Request(url=comment_next_url, headers=self.header, callback=self.comment_parse,
                                         meta={"mid": mid})
        except JSONDecodeError as e:
            print("#" * 50)
            print(e)
            print("#" * 50)

    # 传入用户数据字典 解析用户数据 返回UserItem对象
    def construct_user_item(self, user):
        # 解析用户的数据
        id = user['id']  # 用户id
        screen_name = user['screen_name']  # 用户名
        followers_count = user['followers_count']  # 粉丝数量
        follow_count = user['follow_count']  # 关注数量
        gender = user['gender']  # 性别

        userItem = UserItem()
        userItem['id'] = id
        userItem['screen_name'] = screen_name
        userItem['followers_count'] = followers_count
        userItem['follow_count'] = follow_count
        userItem['gender'] = gender
        return userItem
