"""
from urllib.request import urlopen

url=r'http://www.baidu.com'
with urlopen(url) as fp:
    #print(fp.read())
    print(fp.read().decode())
"""

"""#GET请求编码
from urllib.parse import urlencode
params = urlencode({'spam':1,'egg':2})
print(params)
url="http://www.musi-cal.com/cgi-bin/query?%s" % params
print(url)
"""

from urllib.request import urlopen
from re import findall,sub,S #S表示单行匹配
from urllib.parse import urljoin
from os.path import basename,isdir
from os import mkdir
from bs4 import BeautifulSoup as bs
import requests

# url=r'http://www.sdtbu.edu.cn/info/1043/24108.htm'

while True:

    pattern = r'<img width=.*?src="(.+?)"'
    with urlopen(url) as fp:
        content = fp.read().decode()
    result = findall(pattern,content)

    pattern = r'<h1.*?>(.*?)</h1>'
    title = findall(pattern,content)[0]
    print(title)

    destDir = r'D:\\山东工商学院'+'\\'+title
    if not isdir(destDir):
        mkdir(destDir)


    for picUrl in result:
        picUrl = urljoin(url,picUrl)
        #print(picUrl)
        with urlopen(picUrl) as fpUrl:
            with open(destDir+'\\'+basename(picUrl),'wb') as fp:
                fp.write(fpUrl.read())

    pattern = r'<p.*?>(.*?)</p>'
    result = findall(pattern,content,S) #此时.可以匹配换行符
    with open(destDir+'\\'+title+'.txt','w')as filep:
        for para in result:
            para = sub(r'<.*?>|【.*?】|&nbsp|;','',para)
            para = para.strip()
            if para != '' and (not para.startswith(('上一条','下一条'))):
                filep.write(para+'\n')
                #print(para.strip())

    pattern = r'上一条：<a href="(.*?)"'
    try:
        nextUrl = findall(pattern,content)[0]
        url = urljoin(url,nextUrl)
        print(url)
    except:
        break

url = r'http://www.sdtbu.edu.cn/info/1043/24111.htm'
web_data = requests.get(url)
web_data.encoding = 'utf-8'
soup = bs(web_data.text,'lxml')
title = soup.select('h1',id='test_dir')
for title in title:
    print(title.text)






