import requests
import pymysql
import time
import log_helper
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)

db = pymysql.connect(host='82.157.127.245',
                     user='root',
                     port=3306,
                     password='ydsungan0406',
                     database='weibo',
                     charset='utf8mb4')
cursor = db.cursor()
sql = "insert into mblog(user_name, user_id, mblog_id, text, pic_urls, video_url, date) values (%s,%s,%s,%s,%s,%s,%s);"


request_headers = {
    "accept": "application/json, text/plain, */*",
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.54 Safari/537.36",
    "accept-language": "zh-CN,zh;q=0.9,en;q=0.8",
    "connection": "close",
    "cookie": "SINAGLOBAL=5188697282285.41.1591635317397; UOR=,,login.sina.com.cn; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WWMpsjhFWGvDNhUl8gSQh155JpX5KMhUgL.Fo-RShqfSK2peoM2dJLoIpeLxKML1--LB-80IsHV9c8790e0; ALF=1667995205; SSOLoginState=1636459206; SCF=Ai2Z0Av6-LNog9gdRY2nsteOJC6e2CE2SpTKVU8Z4qEGiQ8pGEGMcN0PHGH1M77tTRE2RBntFBlLDCzNJCJCQ0E.; SUB=_2A25MjhKXDeRhGeNG71QU9S_NyTuIHXVv-gNfrDV8PUNbmtB-LXbBkW9NS0WHTCfACgMo2hr5IgQSSLmN6NIk8g7n; XSRF-TOKEN=F-0HgyQ_oblBWJuosG1mCGrk; _s_tentry=weibo.com; Apache=668921531491.4688.1636459251105; ULV=1636459251317:23:7:4:668921531491.4688.1636459251105:1636386472262; WBPSESS=Dt2hbAUaXfkVprjyrAZT_O7srNv1hiaxzlKI2VfifSBrOS_Y6N9Waix4b-nj-gG_A8aSCX62-q73R6VDEUSj_GzGalsItrGOKHROTxKudWBg_XqlgZJa1rPXEQSpaOxk_hIE5OWC0yWEi6v4OhPXNKKrGYJpsNzdnY34T8-bF5FOSJSGBzjZMKLGT25terZGkScjWSFt3_5QZCw8h8_bMg=="


}

mymblog_url = "https://weibo.com/ajax/statuses/mymblog"
longtext_url = "https://weibo.com/ajax/statuses/longtext"

uid_list = ["1496814565"]

since_id = ""
mblogs = []
for uid in uid_list:
    page = 1
    query_sql = "select count(id) from uid where id = %s;"
    insert_sql = "insert into uid(id) values (%s);"
    cursor.execute(query_sql, (uid))
    query_res = cursor.fetchone()
    if page == 1:
        if query_res[0] > 0 :
            continue
        else:
            cursor.execute(insert_sql, (uid))
            db.commit()


    run_over = 1
    cookies_expt = 1
    while True:
        if page == 1:
            params = {
                "page": page,
                "uid": uid,
                "feature": 0
            }
        else:
            params = {
                "page": page,
                "uid": uid,
                "feature": 0,
                "since_id": since_id
            }
        status = -1
        try:
            mymblog_resp = requests.session().get(mymblog_url, headers=request_headers, params=params)
        except Exception as e:
            log_helper.error("爬取失败!!!:Max retries !")
            page += 1
            continue
        if mymblog_resp.status_code == 414:
            log_helper.error("请求URI太长,停止爬虫")
            break
        elif mymblog_resp.status_code == 200:
            mymblog_resp = mymblog_resp.json()
            status = mymblog_resp.get("ok")
        if status < 0:
            log_helper.error("cookies失效{}次".format(cookies_expt))
            if cookies_expt > 5:
                break
            else:
                cookies_expt += 1
                continue
        cookies_expt = 1
        if mymblog_resp.get("data") is None:
            continue
        mblog_list = mymblog_resp.get("data").get("list")
        if len(mblog_list) == 0:
            log_helper.error("{}的微博爬取完毕{}次".format(uid, run_over))
            if run_over > 5:
                log_helper.error("结束爬取{}的微博".format(uid))
                break
            else:
                run_over += 1
                continue
        run_over = 1


        for mblog in mblog_list:
            mblogid = mblog.get("mblogid")
            user_screen_name = mblog.get("user").get("screen_name")
            isLongText = mblog.get("isLongText")
            text_raw = mblog.get("text_raw")

            if isLongText:
                longtext_param = {"id": mblogid}
                try:
                    longtext_resp = requests.session().get(longtext_url, headers=request_headers, params=longtext_param)
                    longtext_resp = longtext_resp.json()
                    if longtext_resp.get("ok") == 1 and longtext_resp.get("data").get("longTextContent") is not None:
                        text_raw = longtext_resp.get("data").get("longTextContent")
                except Exception as e:
                    log_helper.error("获取长文本失败")

            pic_urls = ""
            if mblog.get("pic_num") > 0 and mblog.get("pic_ids") is not None:
                pic_ids = mblog.get("pic_ids")
                pic_infos = mblog.get("pic_infos")
                pic_url_list = []
                for pic_id in pic_ids:
                    if pic_infos is None or pic_infos.get(pic_id) is None:
                        continue
                    pic_url_list.append(pic_infos.get(pic_id).get("original").get("url"))
                pic_urls = ",".join(pic_url_list)

            video_url = ""
            if mblog.get("url_struct") is not None:
                long_url = mblog.get("url_struct")[0].get("long_url")
                if long_url.find("video.weibo.com") != -1:
                    video_url = long_url
            created_at = mblog.get("created_at")

            mblogs.append((user_screen_name, uid, mblogid, text_raw, pic_urls, video_url, created_at))
            if (len(mblogs) == 10):
                cursor.executemany(sql, mblogs)
                db.commit()
                mblogs = []
        if page % 100 == 0:
            log_helper.info("已经爬取{}页了，休息30秒吧！".format(page))
            time.sleep(29)
        elif page % 5 == 0:
            log_helper.info("已经爬取{}页了，休息1秒.".format(page))
            time.sleep(1)
        else:
            log_helper.info("已经爬取{}页.".format(page))
        page += 1
    log_helper.info("{}的微博爬取完成".format(uid))

cursor.close()
db.close()