
# 这是一个爬取课本封皮和课本目录的爬虫
# 导入requests 库，命令是  pip3 install requests
import requests
# 导入BeautifulSoup 库，命令是 pip3 install bs4
from bs4 import BeautifulSoup
# url 是目录页，建议打开这个链接看看
url = "http://www.xdstudy.com/h5/category/student/followthetext/"
# url2 是第二页的目录页，也是建议打开链接看看
url2 = "http://www.xdstudy.com/h5/category/student/followthetext/?page=2"
urlList = [url, url2]
allOfshu = {}
i = 0

for urlItem in urlList:
    response = requests.get(urlItem)
    htmlContent = response.content.decode("utf-8")
    soup = BeautifulSoup(htmlContent, 'lxml')
    container = soup.find('div',class_="container")
    lesson_item = container.find_all('div', class_="lesson-item")
    shu = {}
    for item in lesson_item:
        imgTag = item.find("img")
        # print(imgTag['src'])
        shu["imgsrc"] = imgTag['src']
        h3Tag = item.find("h3", class_="title" )
        # print(h3Tag.text)
        shu["title"] = h3Tag.text
        aTag = item.find_all("a")
        # print(liTag)
        mulu = []
        for aaa in aTag:
            a = aaa.find("span",class_="lesson-num").text
            b = aaa.find("span",class_="lesson-name").text
            c = aaa['href']
            c = c.split("/")
            c = c[3]
            duixiang = {}
            duixiang[c] = a + " " + b
            mulu.append(duixiang)
        # print(mulu)
        shu["mulu"] = mulu
        print(shu)
    allOfshu[i] = shu
    i = i + 1

print(allOfshu)




