#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import os
import re
import time
import queue
import requests
from bs4 import BeautifulSoup
from multiprocessing import Pool


class Crawler:

    visited = set()

    def __init__(self):
        self.rootDir = 'D:/opt/images/'
        pass

    def crawler(self, seed):
        pool = Pool(4)

    def fetch(self, seed):
        self.visited |= {seed}  # 标记为已访问
        try:
            response = requests.get(seed)
            response.raise_for_status()
            soup = BeautifulSoup(response.text, "html.parser")
            images = [img for img in soup.find_all('img', {"src":re.compile(r'(http|https)://.*\.(jpg|gif|png|jpeg|bmp|ico)')})]
            self.parseImg(images)

            for link in soup.find_all('a', {"href":re.compile(r'(http|https)://.*')}):
                href = link['href']
                if not href or href.startswith('javascript') or href.startswith('#'):
                    continue
                if href not in self.visited:
                    self.fetch(href)
        except requests.RequestException as ex:
            print(ex)

    def parseImg(self, imgLinks):
        for link in imgLinks:
            filename = os.path.split(link['src'])[1]  # 地址分为路径和文件
            filePath = self.rootDir+filename
            try:
                response = requests.get(link['src'])
                # time.sleep(1)
                open(filePath, 'wb').write(response.content)
                print('saved img[{}] from {}'.format(filename, link['src']))  # 输出观察一下
            except BaseException as ex:
                print(ex)
                if os.path.exists(filePath):
                    os.remove(filePath)
            finally:
                pass


if __name__ == '__main__':
    crawler = Crawler()
    crawler.fetch('http://image.baidu.com/')
