import requests
from jsonpath import jsonpath
import re
import time
import random
import pymysql


class WeiBoSpider(object):
    def __init__(self):
        # 一级评论
        self.one_url = 'https://m.weibo.cn/comments/hotflow'
        self.one_data = {
            'id': '4813628149072458',
            'mid': '4813628149072458',
            'max_id': None,  # 由于第一页没有这个值，所以先设置为空
            'max_id_type': '0'
        }
        # 二级评论
        self.two_url = 'https://m.weibo.cn/comments/hotFlowChild'
        self.two_data = {
            'cid': '4813628329693567',
            'max_id': '0',
            'max_id_type': '0'
        }
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.0.0 Mobile Safari/537.36',
            'Referer': 'https://m.weibo.cn/detail/4813628149072458?sudaref=login.sina.com.cn',
            'Cookie': '__bid_n=18779b793705657ebe4207; FPTOKEN=3f8e0lMMUdjY/qF0X9LJbVN1huVdb8Eyuiat15YW5sQcSyBe0HPQpKDj2VD+ny1i+mETnj8YBiSK3UJZPLwZ3i35F42pvaaM5q5H2KV5VZTYVOJdZKTD3doPYIImk/sk3w1VXDNXG24spL7dwNorhkqriOPIpqS/dlrtKyeuSgh4tKega/wlaD/8OPvruY9dUBrwH3c9ZSuVn/e9La0BqgZNRg0r6a6ktSQy1eHovOkpBmDM5+FWZpT2v3PWDgcfP0kBipPFrqV+hsPox2fRsIbJMMkxBm/mRgu6myikswfsWdBKpQNTF4arG+a72BXvEA3yBFlfwzptxGymhNJvEu+As+F5tXndih0fpRwqZ5I4mlV8FaboJc9aApq0/DgyVgJDnBajT/VK2dvOu9bt7A==|msny62w+VGSo+HkiMXm12AKoHJe99kJan3WKxX8GIzs=|10|6bb56265e23e5df1cf7be313d3c2218b; _T_WM=34094697606; WEIBOCN_FROM=1110006030; XSRF-TOKEN=de6560; SCF=AhucQKsZDPwkoBsO7r9b4whb_L8uwRN75Cqw_xYgK5WGhfNj3vx48l4PleIk5854CXAq-fjLaKPWX2OLjeZKH-I.; SUB=_2A25Ji5igDeRhGeFG6FEQ9ynOzz6IHXVrdzjorDV6PUJbktAGLVjdkW1NedzZMos2qzJsbVtJRBjpGSbrgNq3BKj9; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9W50ZpV8QRGbaof--s1NXFBZ5JpX5K-hUgL.FoMRe0epS0MEShz2dJLoIp7LxKML1KBLBKnLxKqL1hnLBoMpe0MXSKn41KM0; SSOLoginState=1687152880; ALF=1689744880; MLOGIN=1; M_WEIBOCN_PARAMS=oid%3D4813628149072458%26luicode%3D20000061%26lfid%3D4813628149072458%26uicode%3D20000061%26fid%3D4813628149072458; mweibo_short_token=b270e497d2'
        }
        self.conn = pymysql.connect(
            user='root',
            password='123456',
            host='127.0.0.1',
            port=3306,
            database='spider6',
            charset='utf8mb4'
        )
        self.cs = self.conn.cursor()
        # 二级评论存储对象
        self.two_text = ''

    def get_one_data(self):
        """获取一级评论相关信息"""
        response = requests.get(self.one_url, headers=self.headers, params=self.one_data)
        json_data = response.json()
        # print(json_data)
        one_comments = jsonpath(json_data, '$..data.*.text')
        one_authors = jsonpath(json_data, '$..data.*.user.screen_name')
        # 获取二级评论中的参数cid，在一级评论中的rootid
        root_ids = jsonpath(json_data, '$..data.*.rootid')
        for one_comment, one_author, root_id in zip(one_comments, one_authors, root_ids):
            # 部分评论中有标签，需要清除掉
            comment = re.sub('<.*?>', '', one_comment)
            print(one_author, '--------->', comment)
            print('---------------')
            try:
                # 更改二级评论的请求参数cid
                self.two_data['cid'] = root_id
                self.get_two_data()  # 二级评论爬取
                #保存数据到数据库中
                self.save_data(comment)

                self.two_text = ''
            except:
                print('此评论下没有二级评论')

        # 已经评论翻页
        # 获取max_id
        max_id = jsonpath(json_data, '$..max_id')[0]
        # print(max_id)
        # 更改请求参数
        self.one_data['max_id'] = max_id
        # 时间等待
        # time.sleep(random.randint(3, 7))
        # 重新发起请求，同样的逻辑，直接递归调用
        self.get_one_data()

    def get_two_data(self):
        """获取二级评论数据"""
        response = requests.get(self.two_url, headers=self.headers, params=self.two_data)
        json_data = response.json()
        # print(json_data)
        two_comments = jsonpath(json_data, '$..data.*.text')
        two_authors = jsonpath(json_data, '$..data.*.user.screen_name')
        for two_author, two_comment in zip(two_authors, two_comments):
            t_comment = re.sub('<.*?>', '', two_comment)
            print('    ', two_author, '==========>', t_comment)
            # 添加二级评论数据
            self.two_text +="("+t_comment+")"
        # 二级评论翻页
        # 获取max_id
        t_max_id = jsonpath(json_data, '$.max_id')[0]
        # print(t_max_id)
        # 更改请求参数
        self.two_data['max_id'] = t_max_id
        if t_max_id == 0:
            print('二级翻页结束', t_max_id)
        else:
            # 递归调用自己翻页爬取
            try:
                # 时间等待
                # time.sleep(random.randint(3, 7))
                self.get_two_data()
            except:
                print('二级翻页爬取结束')
                self.two_data['max_id'] = '0'
                print('+++++++++++++++++')
    def save_data(self,comment):
        # 存储数据
        sql = 'insert into wbpl(one_comment, two_comment) values (%s, %s)'
        self.cs.execute(sql, [comment, self.two_text])
        self.conn.commit()

    def run(self):
        self.get_one_data()
        # self.get_two_data()
        self.cs.close()
        self.conn.close()



if __name__ == '__main__':
    wbs = WeiBoSpider()
    wbs.run()




















