import requests
import pymysql
import time
from mylogger import logger
import urllib3
import random
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

def get_raw_text(text):
    raw_text = ""
    flag = 0
    for i in text:
        if i == ">":
            flag = 1
        if i == "<":
            flag = 0
        if flag == 1:
            raw_text += i
    return raw_text.replace(">","").lstrip().rstrip()

db = pymysql.connect(
                     host='82.157.127.245',
                     #host='127.0.0.1',
                     user='root',
                     port=3306,
                     password='ydsungan0406',
                     database='weibo',
                     charset='utf8mb4')
cursor = db.cursor()
sql = "insert into mblog3(user_name, user_id, mblog_id, text, pic_urls, video_url, date) values (%s,%s,%s,%s,%s,%s,%s);"

query_sql = "select count(id) from uid where id = %s;"

request_headers = {
    "accept": "application/json, text/plain, */*",
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36",
    "accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
    "connection": "close",

    "cookie": "SINAGLOBAL=5188697282285.41.1591635317397; UOR=,,login.sina.com.cn; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WWMpsjhFWGvDNhUl8gSQh155JpX5KMhUgL.Fo-RShqfSK2peoM2dJLoIpeLxKML1--LB-80IsHV9c8790e0; ULV=1637840410467:28:12:2:9614082361043.055.1637840410334:1637810455479; ALF=1669522707; SSOLoginState=1637986709; SCF=Ai2Z0Av6-LNog9gdRY2nsteOJC6e2CE2SpTKVU8Z4qEG_3LOk6SJqCBFhxYvCfYY2b1wTx8eLkoYwVSvLPQq94c.; SUB=_2A25MpcHFDeRhGeNG71QU9S_NyTuIHXVv0rQNrDV8PUNbmtAKLVnekW9NS0WHTHyNecioxIyccJCkcj3xuprxrSRW; XSRF-TOKEN=a6QlJCPmb6v08JiW8N-H0ICe; WBPSESS=06OPD5P0XEw_t5OR5Kxbm8LBP7lWR2UE_2z2KVDNAnSr3_vSbYWbCL9QfruV8CGcK5PCypOFRjCStXI2qkr6kNMJaJidqNGa-jlaCW0P6FhZw-fo211oejgx-M4t3HxH9SyAMLU5AE-SAzI82C3JqQ=="

}

mymblog_url = "https://m.weibo.cn/api/container/getIndex"
longtext_url = "https://weibo.com/ajax/statuses/longtext"





def get_userid(uid):
    friends_ids = []
    get_friends_url = "https://weibo.com/ajax/friendships/friends"
    page = 1
    max_tries = 1
    while True:
        params = {
            "page": page,
            "uid": uid
        }
        try:
            get_friends_resp = requests.session().get(get_friends_url, headers=request_headers, params=params)
        except:
            logger.error("Max retries exceeded with url: mymblog_url {}次".format(max_tries))
            logger.error("暂停5秒")
            if max_tries > 5:
                break
            else:
                max_tries += 1
                continue
        max_tries = 1
        if get_friends_resp.status_code == 200:
            get_friends_resp = get_friends_resp.json()
            if get_friends_resp.get("ok") == 1:
                users = get_friends_resp.get("users")
                if len(users) == 0:
                    logger.error("{}的关注朋友获取完毕，停止".format(uid))
                    break
                else:
                    for user in users:
                        friends_ids.append(user.get("id"))
            else:
                logger.error("您无法查看该用户的关注列表，请更换一个用户id")
                break
        else:
            logger.error("获取{}的关注列表的第{}页失败,跳过该页".format(uid, page))
        page += 1
    return friends_ids

def get_follower(uid = "6369643676"):
    follower_ids = []
    get_follower_url = "https://weibo.com/ajax/friendships/friends"
    page = 1
    max_tries = 1
    while True:
        params = {
            "relate": "fans",
            "page": page,
            "uid": uid,
            "type": "fans",
            "newFollowerCount": 0,
        }
        try:
            get_friends_resp = requests.session().get(get_follower_url, headers=request_headers, params=params)
        except:
            logger.error("Max retries exceeded with url: mymblog_url {}次".format(max_tries))
            logger.error("暂停5秒")
            if max_tries > 5:
                break
            else:
                max_tries += 1
                continue
        max_tries = 1
        if get_friends_resp.status_code == 200:
            get_friends_resp = get_friends_resp.json()
            if get_friends_resp.get("ok") == 1:
                users = get_friends_resp.get("users")
                if len(users) == 0:
                    logger.error("{}的粉丝获取完毕，停止".format(uid))
                    break
                else:
                    for user in users:
                        follower_ids.append(user.get("id"))
            elif get_friends_resp.get("ok") == -100:
                logger.error("请重新扫码登录网页微博...")
                raise RuntimeError
            else:
                logger.error("您无法查看该用户的粉丝列表，请更换一个用户id")
                break
        else:
            logger.error("获取{}的粉丝列表失败,停止爬取".format(uid))
        page += 1
    return follower_ids




#111.199.70.229 X
#36.112.188.131

#continue_uids = ["1912728955", "1582138964", "5456865382", "2172061270","1679704482","1709405635","1850145517","1094212551","2810373291","1765891182","1863847262","7586822738"]

remove_uids = ["2803301701", "2656274875", "6189120710", "1686546714", "1974576991", "1663072851"]

uid_list = []
continue_uids = []

mblogs = []
while True:
    if len(uid_list) != 0:
        for remove_uid in remove_uids:
            try:
                uid_list.remove(int(remove_uid))
            except:
                pass
    if len(uid_list) == 0:
        if len(continue_uids) == 0:
            continue_uids = get_follower()

        rand_index = random.randint(0, len(continue_uids) - 1)
        original_uid = continue_uids[rand_index]
        uid_list = get_userid(original_uid)
        continue

    rand_index = random.randint(0, len(uid_list)-1)
    original_uid = uid_list[rand_index]
    for uid in uid_list:

        query_sql = "select count(id) from uid where id = %s;"
        insert_sql = "insert into uid(id) values (%s);"
        cursor.execute(query_sql, (uid))
        query_res = cursor.fetchone()
        if query_res[0] > 0:
            continue
        else:
            cursor.execute(insert_sql, (uid))
            db.commit()

        page = 1
        run_over = 1
        cookies_expt = 1
        max_tries = 1
        while True:
            if page == 1:
                params = {
                    "value": uid,
                    "containerid": "107603" + str(uid),
                    "type": "uid",
                    "jumpfrom": "weibocom",
                }
            else:
                params = {
                    "page": page,
                    "value": uid,
                    "containerid": "107603" + str(uid),
                    "type": "uid",
                    "jumpfrom": "weibocom",
                }
            status = -1
            try:
                mymblog_resp = requests.session().get(mymblog_url, headers=request_headers, params=params)
            except:
                logger.error("Max retries exceeded with url: mymblog_url {}次".format(max_tries))
                logger.error("暂停5秒")
                if max_tries > 5:
                    break
                else:
                    max_tries += 1
                    continue
            max_tries = 1
            if mymblog_resp.status_code == 414:
                logger.error("请求URI太长, 接口被限制，停止爬虫")
                break
            elif mymblog_resp.status_code == 200:
                mymblog_resp = mymblog_resp.json()
                status = mymblog_resp.get("ok")
            if status < 0:
                logger.error("cookies失效{}次".format(cookies_expt))
                if cookies_expt > 5:
                    break
                else:
                    cookies_expt += 1
                    continue
            cookies_expt = 1
            if mymblog_resp.get("data") is None:
                continue

            cards = mymblog_resp.get("data").get("cards")

            if len(cards) == 0:
                logger.error("{}的微博爬取完毕{}次".format(uid, run_over))
                if run_over > 5:
                    logger.error("结束爬取{}的微博".format(uid))
                    break
                else:
                    run_over += 1
                    continue
            run_over = 1


            for card in cards:
                mblog = card.get("mblog")
                if mblog is None:
                    continue
                mblogid = mblog.get("bid")
                if mblog.get("user") is None:
                    continue
                user_screen_name = mblog.get("user").get("screen_name")
                isLongText = mblog.get("isLongText")
                text = mblog.get("text")
                text_raw = get_raw_text(text)
                if len(text_raw) == 0:
                    continue
                if isLongText:
                    longtext_param = {"id": mblogid}
                    try:
                        longtext_resp = requests.session().get(longtext_url, headers=request_headers, params=longtext_param)
                        if longtext_resp.status_code == 414:
                            logger.error("请求长文本的URI太长, 接口被限制，请立即更换cookie或更换账号 ")
                        longtext_resp = longtext_resp.json()
                        if longtext_resp.get("ok") == 1 and longtext_resp.get("data").get("longTextContent") is not None:
                            text_raw = longtext_resp.get("data").get("longTextContent")
                    except Exception as e:
                        logger.error("获取长文本失败")
                pic_urls = ""
                if mblog.get("pic_num") > 0 and mblog.get("pics") is not None:
                    pic_infos = mblog.get("pics")
                    pic_url_list = []
                    for pic in pic_infos:
                        pic_url_list.append(pic.get("url"))
                    pic_urls = ",".join(pic_url_list)

                video_url = ""
                if mblog.get("page_info") is not None:
                    if mblog.get("page_info").get("type") == "video":
                        video_urls = mblog.get("page_info").get("urls")
                        if video_urls is not None and type(video_urls) != type([1]):
                            if video_urls.get("mp4_hd_mp4") is not None:
                                video_url = video_urls.get("mp4_hd_mp4")


                created_at = mblog.get("created_at")

                mblogs.append((user_screen_name, uid, mblogid, text_raw, pic_urls, video_url, created_at))
                if (len(mblogs) >= 10):
                    cursor.executemany(sql, mblogs)
                    db.commit()
                    mblogs = []
            if page % 100 == 0:
                logger.info("已经爬取{}页了，休息20秒吧！".format(page))
                time.sleep(19)
            elif page % 5 == 0:
                logger.info("已经爬取{}页了，休息1秒.".format(page))
                time.sleep(1)
            else:
                logger.info("已经爬取{}页.".format(page))
            page += 1
        logger.info("{}的微博爬取完成".format(uid))

    uid_list = get_userid(original_uid)
    #uid_list遍历完成
    #cursor.close()
    #db.close()