import requests
import pymysql
import time
import log_helper
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

def get_raw_text(text):
    raw_text = ""
    flag = 0
    for i in text:
        if i == ">":
            flag = 1
        if i == "<":
            flag = 0
        if flag == 1:
            raw_text += i
    return raw_text.replace(">","")

db = pymysql.connect(host='82.157.127.245',
                     user='root',
                     port=3306,
                     password='ydsungan0406',
                     database='weibo',
                     charset='utf8mb4')
cursor = db.cursor()
sql = "insert into mblog(user_name, user_id, mblog_id, text, pic_urls, video_url, date) values (%s,%s,%s,%s,%s,%s,%s);"


request_headers = {
    "accept": "application/json, text/plain, */*",
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36",
    "accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
    "connection": "close",
    "cookie": "SINAGLOBAL=5188697282285.41.1591635317397; UOR=,,login.sina.com.cn; SCF=Ai2Z0Av6-LNog9gdRY2nsteOJC6e2CE2SpTKVU8Z4qEG1RbK1YMWNszlf5UzZYpDsRuHhap8mAjTu7eo999WSB8.; XSRF-TOKEN=WSEEe29kOZxhK6sa2xBDZfZp; _s_tentry=weibo.com; Apache=227778119879.2983.1635504056800; ULV=1635504056879:16:16:5:227778119879.2983.1635504056800:1635408640967; wb_view_log_6373877161=1280*7202; webim_unReadCount=%7B%22time%22%3A1635560922186%2C%22dm_pub_total%22%3A0%2C%22chat_group_client%22%3A0%2C%22chat_group_notice%22%3A0%2C%22allcountNum%22%3A20%2C%22msgbox%22%3A0%7D; SUB=_2A25MeN5bDeRhGeFJ6loW9i3FwjSIHXVvDEiTrDV8PUNbmtB-LWfbkW9NfJdnsEMpNDo3j9SlGkDosLueZROmMUwK; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WFJM0.6mFCQK.KCWqS8Qsk45JpX5KzhUgL.FoMNeKnNSoe41Kn2dJLoI0YLxKqL1KnL12-LxK-LBKBLBK.LxKqL1KMLBK.LxKnLBo-LBoMLxK-L12-LB.zLxK-LB-BLBKqLxKBLBo.LBK5t; ALF=1667096971; SSOLoginState=1635560971; WBPSESS=Dt2hbAUaXfkVprjyrAZT_BFYpHBTIYcaFBnvxXJxXZ_z7tZ5vi1wjSCGKEd9RC-KJqOC0wXeYwOEX2u3PqVshMRxPn1_PQ_SkeT2pyYxQAiYaHM1eXUbFATWiiwkxxI45QSTB5g3IsLmXCQI2cCwHaGoT0jxL6b9CNnBbkWXWiWq73RuBuCE5jyymCTi6rXvwJoQhGIUpg5STITGmO2gzg=="

}

mymblog_url = "https://m.weibo.cn/api/container/getIndex"
longtext_url = "https://weibo.com/ajax/statuses/longtext"

uid_list = ["2125613987","2817621143"]
since_id = ""

#111.199.70.229 X
#36.112.188.131

for uid in uid_list:
    page = 1
    run_over = 1
    while True:
        if page == 1:
            params = {
                "value": uid,
                "containerid": "107603" + uid,
                "type": "uid",
                "jumpfrom": "weibocom",
            }
        else:
            params = {
                "page": page,
                "value": uid,
                "containerid": "107603" + uid,
                "type": "uid",
                "jumpfrom": "weibocom",
            }
        status = -1
        mymblog_resp = requests.session().get(mymblog_url, headers=request_headers, params=params)

        if mymblog_resp.status_code == 414:
            log_helper.error("请求URI太长,停止爬虫")
            break
        elif mymblog_resp.status_code == 200:
            mymblog_resp = mymblog_resp.json()
            status = mymblog_resp.get("ok")
        if status < 0:
            log_helper.error("cookies失效")
            break

        if mymblog_resp.get("data") is None:
            continue

        cards = mymblog_resp.get("data").get("cards")

        if len(cards) == 0:
            log_helper.error("{}的微博爬取完毕{}次".format(uid, run_over))
            if run_over > 5:
                log_helper.error("结束爬取{}的微博".format(uid))
                break
            else:
                run_over += 1
                continue
        run_over = 1
        mblogs = []

        for card in cards:
            mblog = card.get("mblog")
            mblogid = mblog.get("bid")
            user_screen_name = mblog.get("user").get("screen_name")
            isLongText = mblog.get("isLongText")
            text = mblog.get("text")
            text_raw = get_raw_text(text)

            if isLongText:
                longtext_param = {"id": mblogid}
                try:
                    longtext_resp = requests.session().get(longtext_url, headers=request_headers, params=longtext_param)
                    longtext_resp = longtext_resp.json()
                    if longtext_resp.get("ok") == 1 and longtext_resp.get("data").get("longTextContent") is not None:
                        text_raw = longtext_resp.get("data").get("longTextContent")
                except Exception as e:
                    log_helper.error("获取长文本失败")

            pic_urls = ""
            if mblog.get("pic_num") > 0 and mblog.get("pics") is not None:
                pic_infos = mblog.get("pics")
                pic_url_list = []
                for pic in pic_infos:
                    pic_url_list.append(pic.get("url"))
                pic_urls = ",".join(pic_url_list)

            video_url = ""
            if mblog.get("url_struct") is not None:
                long_url = mblog.get("url_struct")[0].get("long_url")
                if long_url.find("video.weibo.com") != -1:
                    video_url = long_url
            created_at = mblog.get("created_at")

            mblogs.append((user_screen_name, uid, mblogid, text_raw, pic_urls, video_url, created_at))
            if (len(mblogs) == 10):
                cursor.executemany(sql, mblogs)
                db.commit()
                mblogs = []
        if page % 100 == 0:
            log_helper.info("已经爬取{}页了，休息30秒吧！".format(page))
            time.sleep(29)
        elif page % 5 == 0:
            log_helper.info("已经爬取{}页了，休息1秒.".format(page))
            time.sleep(1)
        else:
            log_helper.info("已经爬取{}页.".format(page))
        page += 1
    log_helper.info("{}的微博爬取完成".format(uid))

cursor.close()
db.close()