import os
import random
import re
import sys
import time
import requests
import zhconv
from bs4 import BeautifulSoup

# page_url = "https://h-ciyuan.com/"
page_url = input("输入要抓取的网址 ")
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/107.0.0.0 Safari/537.36'
}


def get_imgs_url(imgs_list):
    str1 = []
    for y in imgs_list:
        tmp = y["src"].strip("//")
        str1.append(tmp)
    return str1


def optimize(str_o):
    replace_str = "_"
    regex = re.compile(r"[^\s0-9.A-Za-z\u4E00-\u9FA5\u3040-\u30FF\u31F0-\u31FF-_\n]|_{2,}|\s{2,}|^_|_$|_-_|-_-|[，。？！]")
    str_tmp = regex.sub(replace_str, str_o)  # 将匹配的部分替换为空格
    str_tmp = regex.sub(replace_str, str_tmp)  # 将多个空格转换为单个
    str_tmp = str_tmp.strip(" ")  # 修整前后空格
    str_tmp = zhconv.convert(str_tmp, "zh-cn")  # 转化为简体中文
    return str_tmp


# 获取扩展名索引
def get_ext_index(imgurl):
    ext_list = [".jpg", ".png", ".gif", ".bmp", "webp", "raw"]
    ext_ind = -1
    for k in ext_list:
        ext_ind = imgurl.rfind(k)
        if ext_ind != -1:
            break
    return ext_ind


# 下载图片
def download(pic_url):
    p = requests.get(pic_url, headers=headers, timeout=3)
    img_name = pic_url[pic_url.rfind("/") + 1:]  # 截取文件名
    if not os.path.exists(page_title):  # 如果不存在页面Title命名的文件夹
        os.mkdir(page_title)  # 新建页面Title命名的文件夹
    full_name = os.path.join(page_title, img_name)  # 组合长路径
    with open(full_name, "wb") as f:
        f.write(p.content)
        f.close()


response = requests.get(page_url, headers=headers)
html = response.text
bs = BeautifulSoup(html, 'html.parser')
imgs = bs.find_all('img')
# 获取网页协议名
protocol_str: str = page_url[0:page_url.find('//') + 2]
page_title = bs.find("title").text
# page_title = optimize(page_title) # 清除特殊字符，可选
print(page_title)

imgs_url = ""
try:
    imgs_url = get_imgs_url(imgs)
except KeyError:
    print("获取数据异常")
dis_downable = []

for i in range(len(imgs_url)):
    ext_index = get_ext_index(imgs_url[i])
    imgs_url[i] = imgs_url[i][0:ext_index + 4]
    if not imgs_url[i].startswith('http'):
        imgs_url[i] = protocol_str + imgs_url[i]

for x in imgs_url:
    # print(x)
    if x.find("data:image") != -1 or get_ext_index(x) == -1:
        dis_downable.append(x)

for x in dis_downable:
    imgs_url.remove(x)
    print("移除", x[:50], "...")

print("可下载图片", len(imgs_url))
print("不可用", len(dis_downable))

for x in imgs_url:
    print(x)
    try:
        download(x)
    except:
        print("下载异常")
    wait_time = random.randint(1, 5) / 10  # 下载间隔
    sys.stdout.flush()
    time.sleep(wait_time)
