"""
@author: chen
@file: 正则表达式作业.py
@time: 2023/6/4 15:04
"""

# 1.验证ipv4地址合法性 1~255.1~255.1~255.1~255
# 2.爬取三创官网所有图片

import re

msg = """
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
    link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
    inet 127.0.0.1/8 scope host lo
       valid_lft forever preferred_lft forever
    inet6 ::1/128 scope host 
       valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP group default qlen 1000
    link/ether 00:0c:29:1e:73:3e brd ff:ff:ff:ff:ff:ff
    inet 192.168.1.54/24 brd 192.168.1.255 scope global noprefixroute ens33
       valid_lft forever preferred_lft forever
    inet6 fe80::20c:29ff:fe1e:733e/64 scope link 
       valid_lft forever preferred_lft forever
"""
flag = 1
if (re.findall(r"(?<=inet ).*?(?=/24)", msg)):
    ip = re.findall(r"(?<=inet ).*?(?=/24)", msg)[0].split(".")
    print(ip)
    for i in ip:
        if int(i)>255 or int(i)<0:
            flag = 0
if flag:
    print("yes")
else:
    print("no")



import re
import requests
import os
from bs4 import BeautifulSoup
from urllib.parse import urljoin

# 定义要爬取的网站URL
url = "https://www.sanchuangedu.cn/"
# 发送HTTP请求并获取响应内容
response = requests.get(url)
content = response.text
print(content)
# 使用正则表达式匹配所有图片的URL
# <img src="(abc.png)" width="400">
img_urls = re.findall(r'<img src=(.+?) width="400"', content)
# img_urls = [urljoin(url, img_url) for img_url in img_urls]
# # 使用BeautifulSoup解析HTML
# soup = BeautifulSoup(content, "html.parser")
# # 找到所有图片标签并提取图片URL
# img_urls = []
# for img in soup.find_all("img"):
#     img_url = img.get("src")
#     if img_url:
#         img_urls.append(img_url)
print(img_urls)
# 下载所有图片到本地
count = 0
for img_url in img_urls:
    img_url_w = url + img_url
    print(img_url_w)
    img_data = requests.get(img_url_w).content
    with open(img_url, "wb") as f:
        f.write(img_data)
    count += 1
