# 爬取微博周口师范学院超话近5000条帖子数据；
# 微博周口师范学院超话；
# 分析页面，结合正则化，爬取帖子数据，保存为csv文件，字段不限，数据量不少于1w条

# 步骤如下:
# 目标爬取微博周口师范学院超话的帖子数据
# 1.加载api文件
# 2.初始化好selenium
# 3.通过selenium点击和搜索从网页源代码中分析出帖子的信息
# 4.导入D.csv文件

import time
import csv
from selenium.webdriver import Edge

# 读取url
u = open("api.txt", mode="r")
url = u.readline()
u.close()

# 初始化一下selenium
web = Edge()
web.get(url)
time.sleep(3)
# 登录
el = web.find_element_by_xpath('/html/body/div[1]/div/div[1]/div/div/div[3]/div[2]/ul/li[3]/a')
el.click()
time.sleep(3)
# 扫码登录
el = web.find_element_by_xpath('/html/body/div[10]/div[2]/div[3]/div[1]/a[2]')
el.click()
time.sleep(16)


def get_dynamic():
    for page in range(251):
        # 执行向下滚动鼠标滚轮的操作
        web.execute_script("window.scrollTo(0, document.body.scrollHeight);")
        time.sleep(2)
        web.execute_script("window.scrollTo(0, document.body.scrollHeight);")
        time.sleep(2)
        web.execute_script("window.scrollTo(0, document.body.scrollHeight);")
        time.sleep(2)
        divs = web.find_elements_by_xpath('//*[@class="WB_cardwrap WB_feed_type S_bg2 WB_feed_like"]')
        for div in divs:
            name = div.find_elements_by_xpath('.//*[@class="W_f14 W_fb S_txt1"]')[0].text
            text = div.find_elements_by_xpath('.//*[@class="WB_text W_f14"]')[0].text
            csvwriter.writerow([name, text])
        time.sleep(1)
        # 下一页
        pd = web.find_element_by_xpath('//*[@class="page next S_txt1 S_line1"]')
        pd.click()
        time.sleep(2)


if __name__ == '__main__':
    # 计时
    t1 = time.time()
    # 初始化帖子数据文件
    f = open("bak/D.csv", mode="w", encoding="utf-8")
    csvwriter = csv.writer(f)
    csvwriter.writerow(["用户名", "用户帖子"])
    # 执行爬取
    get_dynamic()

    t2 = time.time()
    f.close()
    print("over!")
    print(f"花费时间为:{t2 - t1:.2f}秒")
