import requests
from fake_useragent import UserAgent
import time
import re
import pymysql



def parseMax_id(text):
    return text["data"]["max_id"]
requests.packages.urllib3.disable_warnings()
url = 'https://m.weibo.cn/comments/hotflow?id=4755530545630702&mid=4755530545630702&max_id_type=0'
ua = UserAgent()
# 处理评论内容
def parseComment(text):
    user_c = text
    if '<span' in text:
        user_c = re.search('(.*?)<span .*', text).group(1)
    user_c = user_c.replace(' ', '')
    return user_c

def saveMysql(data):
    connect = pymysql.connect(host="localhost", user="root", passwd="111111", port=3306, db="weibo", charset="utf8")
    cur = connect.cursor()
    for i,j,k in zip(data["authorName"],data["comment"],data["LikeNum"]):
        sql = "insert into comment(`name`,`comment`,`like`) values ('%s','%s','%s')"%(i,j,k)
        print(sql)
        cur.execute(sql)
        connect.commit()
    cur.close()
    connect.close()


if __name__ == '__main__':

    headers = {
        "Host": "m.weibo.cn",
        "Connection": "keep-alive",
        "Accept": "application/json, text/plain, */*",
        "MWeibo-Pwa": "1",
        "X-XSRF-TOKEN": "da898d",
        "X-Requested-With": "XMLHttpRequest",
        "User-Agent": ua.chrome,
        "Sec-Fetch-Site": "same-origin",
        "Sec-Fetch-Mode": "cors",
        "Sec-Fetch-Dest": "empty",
        "Referer": "https://m.weibo.cn/detail/4614060694573362",
        "Accept-Encoding": "gzip, deflate, br",
        "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6",
        "Cookie": "ALF=1651912210; _T_WM=74612986980; WEIBOCN_FROM=1110006030; SCF=AhXAwMG6YiiPQHHxBBcs9ONLrt7RJFY5wvclT6CZkMofoOiroyo-U66lhiRInnXsWr6N6PRpaD9urNhF18tN5LM.; SUB=_2A25PStVADeRhGeFJ7lAS9S7NzTqIHXVstPsIrDV6PUJbktB-LUXNkW1Nf6XBQEVg1q7tr9S0yXCCUSHYFFeFlvfT; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WWkvbw1nOumH9-nAT5SbLwu5JpX5KzhUgL.FoMNSKz0SK5pSoq2dJLoIp7LxKML1KBLBKnLxKqL1hnLBoMNS0-Ee0-7eKqc; SSOLoginState=1649321230; MLOGIN=1; XSRF-TOKEN=be2f0a; M_WEIBOCN_PARAMS=oid=4755530545630702&luicode=20000061&lfid=4755530545630702"
    }

    # 定义一个计数器用于存放已爬取的评论总数
    count = 0
    # 存放评论内容
    comment = []
    # dist{}保存所需结果
    resData = {
        "authorName":[],
        "LikeNum":[],
        "comment":[]
    }
    max_id = 0
    flag = 0
    indexpage = 1
    Nums = int(input("请输入你要爬取的条数(建议不超过1000个:)"))
    while True:
        if count == 0:
            url = 'https://m.weibo.cn/comments/hotflow?id=4755530545630702&mid=4755530545630702&max_id_type=0'
        else:
            url = 'https://m.weibo.cn/comments/hotflow?id=4755530545630702&mid=4755530545630702&max_id={max_id}&max_id_type={flag}'.format(max_id=max_id,flag = flag)
        # 使当前线程睡眠3s
        time.sleep(3)
        try:
            print("第{page}页正在爬取:...,已经爬取{num}".format(page = indexpage,num=count))
            res = requests.get(url, headers = headers).json()
            flag = 0
            # print(str(count)+"==>"+str(res))
        except:
            count = count + 19
            flag = 1

            continue
        # 获取max_id
        try:
            max_id = parseMax_id(res)
            flag = 0
        except:
            print(res)
            flag = 1
            continue
        # 获取评论内容
        for i in res["data"]["data"]:
           #  将评论内容放到字典里
           resData["comment"].append(parseComment(i["text"]))
           # 点赞数放到字典里
           resData["LikeNum"].append(i["like_count"])
        #     将用户名放到指定位置
           resData["authorName"].append(i["user"]["screen_name"])
        print("第{page}页爬取成功...".format(page = indexpage))
        count = count+19
        indexpage = indexpage+1
        if count>Nums:
            break
    #  将数据保存到数据库中
    saveMysql(resData)
# print(len(comment))