import requests
import pymysql
import time
import urllib3
import random

urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

def get_raw_text(text):
    raw_text = ""
    flag = 0
    for i in text:
        if i == ">":
            flag = 1
        if i == "<":
            flag = 0
        if flag == 1:
            raw_text += i
    return raw_text.replace(">","").lstrip().rstrip()

db = pymysql.connect(host='82.157.127.245',
                     user='root',
                     port=3306,
                     password='ydsungan0406',
                     database='weibo',
                     charset='utf8mb4')
cursor = db.cursor()
sql = "insert into mblog(user_name, user_id, mblog_id, text, pic_urls, video_url, date) values (%s,%s,%s,%s,%s,%s,%s);"


request_headers = {
    "accept": "application/json, text/plain, */*",
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36",
    "accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
    "connection": "close",

    "cookie": "SINAGLOBAL=3408463932239.498.1635578620037; UOR=,,cn.bing.com; SCF=Au3USWtE9DqSnmw3ffXUwx6DEvgbEZBgqMOGTtu_hrZkh3GkMMkEobz02TjBKOy8pjIRBRxXgurYsL4QMxH43r0.; SUB=_2A25MhuaJDeRhGeNG71QU9S_NyTuIHXVv8l9BrDV8PUJbmtB-LW3mkW9NS0WHTGLxlKg4XkLfqGeEQDXQO4vomAwU; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WWMpsjhFWGvDNhUl8gSQh155JpX5K-hUgL.Fo-RShqfSK2peoM2dJLoIpeLxKML1--LB-80IsHV9c8790e0; ALF=1667484249; SSOLoginState=1635948250; XSRF-TOKEN=kVmmssLpLktXN54esOheW6g_; _s_tentry=weibo.com; Apache=5515545686779.839.1635948258801; ULV=1635948258844:13:9:11:5515545686779.839.1635948258801:1635934636136; WBPSESS=Dt2hbAUaXfkVprjyrAZT_O7srNv1hiaxzlKI2VfifSBrOS_Y6N9Waix4b-nj-gG_A8aSCX62-q73R6VDEUSj_GzGalsItrGOKHROTxKudWBg_XqlgZJa1rPXEQSpaOxkMY5w4ZAtBx22Le5FCA9DNJ1n2iGEWZB7vr5nqEM4gPA9iMbGspjWj06U4D3UnXzxcqfXqGIM6pIWBp4Mebwd9g== "
}

mymblog_url = "https://m.weibo.cn/api/container/getIndex"
longtext_url = "https://weibo.com/ajax/statuses/longtext"




def get_userid(uid):
    friends_ids = []
    get_friends_url = "https://weibo.com/ajax/friendships/friends"
    page = 1
    while True:
        params = {
            "page": page,
            "uid": uid
        }
        get_friends_resp = requests.session().get(get_friends_url, headers=request_headers, params=params)
        if get_friends_resp.status_code == 200:
            get_friends_resp = get_friends_resp.json()
            if get_friends_resp.get("ok") == 1:
                users = get_friends_resp.get("users")
                if len(users) == 0:
                    print("{}的关注朋友获取完毕，停止".format(uid))
                    break
                else:
                    for user in users:
                        friends_ids.append(user.get("id"))
            else:
                print("您无法查看该用户的关注列表，请更换一个用户id")
                break
        else:
            print("获取{}的关注列表失败,停止爬取".format(uid))
        page += 1
    return friends_ids

def get_follower(uid = "1795078941"):
    follower_ids = []
    get_follower_url = "https://weibo.com/ajax/friendships/friends"
    page = 1
    while True:
        params = {
            "relate": "fans",
            "page": page,
            "uid": uid,
            "type": "fans",
            "newFollowerCount": 0,
        }
        get_friends_resp = requests.session().get(get_follower_url, headers=request_headers, params=params)
        if get_friends_resp.status_code == 200:
            get_friends_resp = get_friends_resp.json()
            if get_friends_resp.get("ok") == 1:
                users = get_friends_resp.get("users")
                if len(users) == 0:
                    print("{}的粉丝获取完毕，停止".format(uid))
                    break
                else:
                    for user in users:
                        follower_ids.append(user.get("id"))
            else:
                print("您无法查看该用户的粉丝列表，请更换一个用户id")
                break
        else:
            print("获取{}的粉丝列表失败,停止爬取".format(uid))
        page += 1
    return follower_ids




#111.199.70.229 X
#36.112.188.131

#continue_uids = ["1912728955", "1582138964", "5456865382", "2172061270","1679704482","1709405635","1850145517","1094212551","2810373291","1765891182","1863847262","7586822738"]

uid_list = []
continue_uids = []

mblogs = []
while True:

    if len(uid_list) == 0:
        if len(continue_uids) == 0:
            continue_uids = get_follower()
        rand_index = random.randint(0, len(continue_uids) - 1)
        original_uid = continue_uids[rand_index]
        uid_list = get_userid(original_uid)
        continue

    rand_index = random.randint(0, len(uid_list)-1)
    original_uid = uid_list[rand_index]
    for uid in uid_list:
        page = 1
        run_over = 1
        while True:
            if page == 1:
                params = {
                    "value": uid,
                    "containerid": "107603" + str(uid),
                    "type": "uid",
                    "jumpfrom": "weibocom",
                }
            else:
                params = {
                    "page": page,
                    "value": uid,
                    "containerid": "107603" + str(uid),
                    "type": "uid",
                    "jumpfrom": "weibocom",
                }
            status = -1
            mymblog_resp = requests.session().get(mymblog_url, headers=request_headers, params=params)

            if mymblog_resp.status_code == 414:
                print("请求URI太长,停止爬虫")
                break
            elif mymblog_resp.status_code == 200:
                mymblog_resp = mymblog_resp.json()
                status = mymblog_resp.get("ok")
            if status < 0:
                print("cookies失效")
                break

            if mymblog_resp.get("data") is None:
                continue

            cards = mymblog_resp.get("data").get("cards")

            if len(cards) == 0:
                print("{}的微博爬取完毕{}次".format(uid, run_over))
                if run_over > 5:
                    print("结束爬取{}的微博".format(uid))
                    break
                else:
                    run_over += 1
                    continue
            run_over = 1


            for card in cards:
                mblog = card.get("mblog")
                if mblog is None:
                    continue
                mblogid = mblog.get("bid")
                user_screen_name = mblog.get("user").get("screen_name")
                isLongText = mblog.get("isLongText")
                text = mblog.get("text")
                text_raw = get_raw_text(text)
                if len(text_raw) == 0:
                    continue
                if isLongText:
                    longtext_param = {"id": mblogid}
                    try:
                        longtext_resp = requests.session().get(longtext_url, headers=request_headers, params=longtext_param)
                        longtext_resp = longtext_resp.json()
                        if longtext_resp.get("ok") == 1 and longtext_resp.get("data").get("longTextContent") is not None:
                            text_raw = longtext_resp.get("data").get("longTextContent")
                    except Exception as e:
                        print("获取长文本失败")


                pic_urls = ""
                if mblog.get("pic_num") > 0 and mblog.get("pics") is not None:
                    pic_infos = mblog.get("pics")
                    pic_url_list = []
                    for pic in pic_infos:
                        pic_url_list.append(pic.get("url"))
                    pic_urls = ",".join(pic_url_list)

                video_url = ""
                if mblog.get("page_info") is not None:
                    if mblog.get("page_info").get("type") == "video":
                        video_urls = mblog.get("page_info").get("urls")
                        if video_urls is not None and type(video_urls) != type([1]):
                            if video_urls.get("mp4_hd_mp4") is not None:
                                video_url = video_urls.get("mp4_hd_mp4")

                created_at = mblog.get("created_at")

                mblogs.append((user_screen_name, uid, mblogid, text_raw, pic_urls, video_url, created_at))
                if (len(mblogs) == 10):
                    cursor.executemany(sql, mblogs)
                    db.commit()
                    mblogs = []
            if page % 100 == 0:
                print("已经爬取{}页了，休息30秒吧！".format(page))
                time.sleep(29)
            elif page % 5 == 0:
                print("已经爬取{}页了，休息1秒.".format(page))
                time.sleep(1)
            else:
                print("已经爬取{}页.".format(page))
            page += 1
        print("{}的微博爬取完成".format(uid))

    uid_list = get_userid(original_uid)
    #uid_list遍历完成
    #cursor.close()
    #db.close()