import requests
import re
import os
import time
from bs4 import BeautifulSoup
from datetime import datetime

import threading
import queue


class ThreadCrawl(threading.Thread):
    def __init__(self, threadName, pageQueue, dataQueue):
        super(ThreadCrawl, self).__init__()
        # 线程名
        self.threadName = threadName
        # 页码队列
        self.pageQueue = pageQueue
        # 数据队列
        self.dataQueue = dataQueue
        # 请求报头
        self.headers = {
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.110 Safari/537.36'}


urlRoot = "http://www.usth.edu.cn/"
urlList = ["http://www.usth.edu.cn/kdyw.htm"]


def main():

    # 获取总页码信息
    response = requests.get(urlList[0])
    response.encoding = 'utf-8'
    html_text = response.text
    soup = BeautifulSoup(html_text, 'html.parser')
    lastPage = int(soup.findAll("span", {"class": "p_no"})[-1].text)

    pageQueue = queue.Queue(lastPage)
    pageQueue.put(urlList[0])
    for i in range(lastPage - 1, 0, -1):
        url = "http://www.usth.edu.cn/kdyw/" + str(i) + ".htm"
        pageQueue.put(url)


if __name__ == "__main__":
    main()
