'''打开页面 https://www.tute.edu.cn/xysh/yxtzsd.htm，使用爬虫技术
读出页面代码，并保存用户设定的图片数在本地文件夹（尝试先保存一张
图）。 
例如，用户输入 10，则保存 10 张。（如果不好做，就做成下载固定的张数）
提示：如果上述网址抓取不到内容或出现错误，就换成别的网站（例如：
https://www.nipic.com/topic/show_27313_1.html），找一个带图片的页面即可。'''
from bs4 import BeautifulSoup #获取数据
import urllib.request,urllib.error  #获取网页数据
# def get_html(url):
#     try:
#         response=urllib.request.urlopen("https://www.tute.edu.cn/xysh/yxtzsd.htm")
#         print(response.read().decode('utf-8'))#对获取到的网页源码进行解码
#     except urllib.error.URLError as e:
#         print(e)
#         return None
def askURL(url): 
    # 伪装成浏览器
    head = {"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36"}
    request = urllib.request.Request(url,headers=head)
    html = ""
    try:
        response = urllib.request.urlopen(request)
        html = response.read().decode("utf-8")
        print(html)
    except urllib.error.URLError as e:
        if hasattr(e,"code"):
            print(e.code)
        if hasattr(e,"reason"):
            print(e.reason)
    return html                     

#在网页中找到图片
# def find_pictures(html):
#     soup = BeautifulSoup(html, 'html.parser')
#     pictures = []
#     for img in soup.find_all('img'):
#         src = img.get('src')
#         if src:
#             pictures.append(src)
#     return pictures
def find(location):                                          
    a=["https://www.tute.edu.cn"]                    
    pictures=[]                                             
    for x in range(10):
        pictures.append([])
        i=location.find(".jpg")+3
        j=i
        while location[i]!='"':
            pictures[x].append(location[i])
            i-=1
        pictures[x]="".join(pictures[x])
        pictures[x]=pictures[x][::-1]
        pictures[x]=a[0]+pictures[x]
        location=location[j::]
    return pictures
def main():
    # baseurl=" https://www.tute.edu.cn/xysh/yxtzsd.htm"
    # html = get_html(baseurl)
    # if html:
    #     pictures = find_pictures(html)
    #     i = 1
    #     for pic in pictures:
    #         try:
    #             response = urllib.request.urlopen(pic)
    #             picture = response.read()
    #             # 写入文件
    #             with open(f"./photo/{i}.jpg", 'wb') as file:
    #                 file.write(picture)
    #             i += 1
    #         except urllib.error.URLError as e:
    #             print(f"Error downloading {pic}: {e}")

    baseurl=" https://www.tute.edu.cn/xysh/yxtzsd.htm"     
    location= askURL(baseurl)
    list=find(location)
    i=1
    for j in list:
        response=urllib.request.urlopen(j)
        # 存图片的二进制码
        picture=response.read()
        # 写入文件
        with open("./photo/%d.jpg"%i,'wb') as file:
            file.write(picture)
        i+=1

if __name__ == "__main__":
    main()
# url="https://www.tute.edu.cn/xysh/yxtzsd.htm"
# # 创建一个Request对象
# req1 = urllib.request.Request(url)
#
# # 添加一个user-agent头部，模拟浏览器请求
# req1.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/104.0.0.0 Safari/537.36')
#
# # 打开URL并获取响应对象
# response1 = urllib.request.urlopen(req1)
#
# # 读取响应内容
# html = response1.read().decode('utf-8')
#
# # 打印HTML内容
# print(html)
#
