# 这个程序主要是运用python里的selenium库进行爬取，selenium是一个功能很强大的类库，提供全自动化爬取服务，具体详解
# 参考 https://www.cnblogs.com/zhaof/p/6953241.html  安装请移步https://www.cnblogs.com/skymyyang/p/7099069.html
# 提示：请在网络良好的情况下运行，爬出来得费个几天
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver import ActionChains
import time

browser = webdriver.Chrome()
browser.get("https://www.xzbu.com/")
action = ActionChains(browser)
# 提交关键字
key = browser.find_element(By.NAME, "key")
key.send_keys("教学质量")
# 模拟提交键
button = browser.find_element_by_class_name("but").click()
# 到此为止可以进入到教学质量的界面,接下来进入到某一个网页
time.sleep(1)
url = browser.current_url
print(url)
search_urls = [url+'/{}'.format(num) for num in range(1,20)]
# 先进第一页开始找内容，并且设置1-666页全部循环
# url_total = ['https://www.xzbu.com/search/教学质量/{}'.format(num1) for num1 in
#              range(1, 20)]
for i in search_urls:
    print(i)
    # 遍历循环这一页所有的内容
    link_as = ['/html/body/div[4]/div[1]/div[1]/div[2]/ul/li[{}]/a'.format(num) for num in range(1, 16)]
    link_list = []
    for a in link_as:
        # print(a)---测试用
        links = browser.find_element_by_xpath(a)
        link_list.append(links)

        # print(link_list[-1])---测试用
        link_list[-1].click()
        title = browser.find_element_by_xpath('//div[@class="article_leftBox"]/h2').text + "\n"
        content = browser.find_element_by_xpath('//div[@class="article_leftBox"]/p').text + "\n\n\n"
        # print(content)---测试用
        with open('data.txt', 'ab') as file:
            file.write(title.encode(encoding='UTF-8', errors='strict'))
            file.write(content.encode(encoding='UTF-8', errors='strict'))
        time.sleep(1)
        browser.back()