import os
import requests
from bs4 import BeautifulSoup
import re
import time
import random
from urllib.parse import quote

class BingImageCrawler:
    def __init__(self, save_dir='src/dataset', headers=None):
        self.save_dir = save_dir
        self.headers = headers or {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        
    def create_folder(self, folder_name):
        """创建保存图片的文件夹"""
        folder_path = os.path.join(self.save_dir, folder_name)
        os.makedirs(folder_path, exist_ok=True)
        return folder_path
        
    def download_image(self, url, folder_path, index):
        """下载单张图片"""
        try:
            response = requests.get(url, headers=self.headers, timeout=10)
            if response.status_code == 200:
                # 获取图片格式
                content_type = response.headers.get('content-type', '')
                ext = '.jpg' if 'jpeg' in content_type else '.png'
                
                # 保存图片
                file_path = os.path.join(folder_path, f'image_{index}{ext}')
                with open(file_path, 'wb') as f:
                    f.write(response.content)
                print(f"成功下载图片: {file_path}")
                return True
        except Exception as e:
            print(f"下载图片失败: {url}")
            print(f"错误信息: {str(e)}")
        return False
        
    def crawl_images(self, keyword, num_images=100):
        """爬取指定数量的图片"""
        folder_path = self.create_folder(keyword)
        downloaded_count = 0
        page = 1
        
        while downloaded_count < num_images:
            # 构造URL
            encoded_keyword = quote(keyword)
            url = f"https://cn.bing.com/images/search?q={encoded_keyword}&form=HDRSC2&first={page}"
            
            try:
                response = requests.get(url, headers=self.headers)
                soup = BeautifulSoup(response.text, 'html.parser')
                
                # 提取图片URL
                img_pattern = r'murl&quot;:&quot;(.*?)&quot;'
                img_urls = re.findall(img_pattern, response.text)
                
                if not img_urls:
                    print("没有找到更多图片")
                    break
                
                # 下载图片
                for img_url in img_urls:
                    if downloaded_count >= num_images:
                        break
                        
                    if self.download_image(img_url, folder_path, downloaded_count + 1):
                        downloaded_count += 1
                        
                    # 随机延迟，避免被封
                    time.sleep(random.uniform(0.5, 2))
                
                page += 1
                
            except Exception as e:
                print(f"爬取页面失败: {url}")
                print(f"错误信息: {str(e)}")
                break
                
        print(f"\n总共下载了 {downloaded_count} 张图片")
        return downloaded_count

def main():
    # 创建爬虫实例
    crawler = BingImageCrawler()
    
    # 设置关键词和数量
    keyword = input("请输入要搜索的关键词: ")
    num_images = int(input("请输入要下载的图片数量: "))
    
    # 开始爬取
    print(f"\n开始爬取 '{keyword}' 的图片...")
    crawler.crawl_images(keyword, num_images)

if __name__ == "__main__":
    main() 