import sys
import os
from bs4 import BeautifulSoup
import re
import time

import requests
import urllib.request
import urllib.parse
import lxml
import shutil



# header = {
#         'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 UBrowser/6.1.2107.204 Safari/537.36'
#     }
 
header = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/129.0.0.0 Safari/537.36'
}



url = "https://cn.bing.com/images/async?q={0}&first={1}&count={2}&scenario=ImageBasicHover&datsrc=N_I&layout=ColumnBased&mmasync=1&dgState=c*9_y*2226s2180s2072s2043s2292s2295s2079s2203s2094_i*71_w*198&IG=0D6AD6CBAF43430EA716510A4754C951&SFX={3}&iid=images.5599"
 
#需要爬取的图片关键词

#name="aborad baby dance real people iamge"
#name="aborad people face image"
name="Pet rabbit image"





#本地存储路径
path = "./"+name 
 


def get_image_format(content):
    """检查内容是否为常见图片格式"""
    if content[:2] == b'\xff\xd8':
        return "jpg"  # JPEG格式开头标识
    elif content[:4] == b'\x89PNG':
        return "png"  # PNG格式开头标识
    elif content[:3] == b'GIF':
        return "gif"  # GIF格式开头标识
    elif content[:2] == b'BM':
        return "bmp"  # BMP格式开头标识
    return None


'''获取缩略图列表页'''
def getStartHtml(url,key,first,loadNum,sfx):
    print("getStartHtml ...")
    page = urllib.request.Request(url.format(key,first,loadNum,sfx),headers = header)
    html = urllib.request.urlopen(page)
    print("getStartHtml end:")
    return html
 
'''从缩略图列表页中找到原图的url，并返回这一页的图片数量'''
def findImgUrlFromHtml(html,rule,url,key,first,loadNum,sfx,count):
    soup = BeautifulSoup(html,"lxml")
    link_list = soup.find_all("a", class_="iusc")
    # print("link_list :",link_list)
    url = []
    for link in link_list:
        try:
            result = re.search(rule, str(link))
            #将字符串"amp;"删除
            url = result.group(0)
            #组装完整url
            url = url[8:len(url)]
            #打开高清图片网址
        except:
            print("error findImgUrlFromHtml")
            continue
        
        if getImage(url, count) == 1:  # 只有在成功下载时才增加计数
            count += 1
    #完成一页，继续加载下一页
    return count
 
# '''从原图url中将原图保存到本地'''
# def getImage(url,count):
#     try:
#         time.sleep(0.5)
#         print("request ",url)
#         urllib.request.urlretrieve(url,path+'/'+str(count+1)+'.jpg')
#         print("request end",url)
#     except Exception :
#         time.sleep(1)
#         print("产生了一点点错误，跳过...")
#     else:
#         print("图片+1,成功保存 " + str(count+1) + " 张图")
 

def getImage(url, count, retries=1):
    for attempt in range(retries):
        try:
            time.sleep(1)  # 控制请求频率
            print("请求中: ", url)
            response = requests.get(url, timeout=10)  # 设置超时时间
            response.raise_for_status()  # 检查请求是否成功

            # 检查内容格式
            image_format = get_image_format(response.content)
            if image_format is None:
                print("未知的图片格式，跳过...")
                return -1
            
            # 保存图片
            with open(os.path.join(path, f'{count + 1}.{image_format}'), 'wb') as f:
                f.write(response.content)
            
            # print("请求结束: ", url)
            print("图片+1, 成功保存 " + str(count + 1) + " 张图")
            return 1 # 成功后返回
        except requests.RequestException as e:
            print(f"尝试 {attempt + 1}/{retries} 失败，错误: {e}")
            time.sleep(0.5)  # 等待 1 秒后重试
    print("产生了一点点错误，跳过...")
    return -1

def main():
    key = urllib.parse.quote(name)
    first = 1
    loadNum = 35
    sfx = 1
    count = 0
    #正则表达式
    rule = re.compile(r"\"murl\"\:\"http\S[^\"]+")
    #图片保存路径
    if not os.path.exists(path):
        os.makedirs(path)
    #抓500张好了
    while count < 500:
        html = getStartHtml(url,key,first,loadNum,sfx)
        count += findImgUrlFromHtml(html,rule,url,key,first,loadNum,sfx,count)
        print("count",count)
        first = count+1
        sfx += 1
 
if __name__ == '__main__':
    main()

