# \weibo-crawler\run.py
import logging

# 不通过命令行，直接运行爬虫
from weibo_webdriver import WeiboWebDriverCrawler

if __name__ == '__main__':
    config = {
        "user_id": "6819693315",# 王
        # "user_id": "5787197886",# 微明
        # "user_id": "6171306584",#  八字
        "sqlite":'data/weibo.db',
        "start_page":1,
        "max_pages":None,
        "cookie": "SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9WFVceO6z43BbvamNp5sASPg5JpX5KMhUgL.FoqfSKMXSh2fSKB2dJLoIX.LxKBLB.zLB.zLxK-L12qLBoqLxKBLBonL1h.LxKqLBozL1K5LxK.LBKeL12Hki--Ri-2pi-2fi--Ni-88iK.Ni--fi-82iK.7; _T_WM=3ac61aa7b85d4fbc4a83ad6f47cc109b; SCF=Aj6Om1OXZlokGRLDEsWz_kKLqPnVFYUWhR_s8FVpaJtGYcSsbsj-wBglH7ejIvqIvZo0mssX5nKzKsDIx63RXhI.; SUB=_2A25FLT3VDeRhGeBL7lUV9C_JzjiIHXVmQz8drDV6PUJbktAYLVStkW1NRu7oEiQROmKRpxB9CtsVsCszN0ceMFQ7; ALF=1750129285"
    }
    # 创建爬虫实例
    user_id = config.get("user_id")
    cookie = config.get("cookie")
    start_page = config.get("start_page",1)
    max_pages = config.get("max_pages")
    db_path = config.get("sqlite")
    output_dir = './output'
    crawler = WeiboWebDriverCrawler(user_id, cookie=cookie, output_dir=output_dir,db_path=db_path)

    if not crawler._login():
        logging.error("首次登录失败，正在检查是否手动登录成功...")

        if crawler._check_login_status():
            logging.info("检测到用户已完成手动登录")
        else:
            logging.error("登录失败，请检查网络或手动登录后重试")
            exit(1)

    # 获取用户信息
    if crawler.get_user_info():
        # 获取微博数据
        crawler.get_weibo_data(start_page=start_page,max_pages=max_pages)
