# coding:utf-8
import re
import sys

from bs4 import BeautifulSoup

reload(sys)
sys.setdefaultencoding("utf-8")

import os
import urllib2


class UrlManager():
    def __init__(self):
        self.news = set()
        self.olds = set()

    def add(self, rootUrl):
        # if rootUrl is None: return
        # if rootUrl in self.news: return
        # if rootUrl in self.olds: return
        self.news.add(rootUrl)

    def hasUrl(self):
        return len(self.news) != 0

    def addList(self, urls):
        if urls is None: return
        if len(urls) == 0: return
        for url in urls:
            self.add(url)

    def get(self):
        url = self.news.pop()
        self.olds.add(url)
        return url


class ResLoader():
    def load(self, url):
        if url is None: return
        response = urllib2.urlopen(url)
        if response.getcode() != 200: return
        data = response.read()
        return data


class ResParser():
    def parse(self, baseUrl, data):
        urls = self.getUrls(baseUrl, data)
        imgs = self.getImgs(baseUrl, data)
        return urls, imgs

    def getUrls(self, baseUrl, data):
        urls = set()
        soup = BeautifulSoup(data, 'html.parser', from_encoding='utf-8')
        urlPattern = re.compile(r".*\.html", re.I)
        links = soup.find_all('a', href=urlPattern)
        for link in links:
            url = link['href']
            urlFull = baseUrl + "/" + url
            urls.add(urlFull)
        return urls

    def getImgs(self, baseUrl, data):
        imgs = set()
        soup = BeautifulSoup(data, 'html.parser', from_encoding='utf-8')
        srcPattern = re.compile(r"http://.*\.(jpg|png)", re.I)
        links = soup.find_all('img', src=srcPattern)
        for link in links:
            img = link['src']
            imgs.add(img)
        return imgs


class ResOuter():
    def __init__(self):
        self.imgList = set()

    def collect(self, imgs):
        if imgs is None: return
        for img in imgs:
            self.imgList.add(img)

    def save(self):
        path='res'
        if os.path.exists(path) ==False:
            os.mkdir(path)
        os.chdir(path)
        fos = open('123.txt', 'wb')
        for img in self.imgList:
            fos.write("img=" + img + "\n")
            fileName = img.split('/')[-1]
            response = urllib2.urlopen(img)
            img = response.read()
            fos = open(fileName, 'wb')
            fos.write(img)
            print "已下载图片:",fileName
        fos.close()


class ImgSpider():
    def __init__(self):
        self.urlManager = UrlManager()
        self.resLoader = ResLoader()
        self.resParser = ResParser()
        self.resOuter = ResOuter()

    def startCraw(self, rootUrl):
        self.urlManager.add(rootUrl)

        if self.urlManager.hasUrl():
            print "有根url"
        else:
            print "没有根url"
        index=1
        imgCount=0;
        while self.urlManager.hasUrl():
            try:
                url = self.urlManager.get()
                print "inex=", index, "url=", url
                data = self.resLoader.load(url)
                urls, imgs = self.resParser.parse(rootUrl, data)
                for img in imgs:
                    imgCount+=1
                    print "img=",img
                self.urlManager.addList(urls)
                self.resOuter.collect(imgs)
            except:
                pass

            if(index==10):break
            index+=1
        print  "imgCount=" ,imgCount
        self.resOuter.save()


if __name__ == "__main__":
    print "开始主方法"
    imgSpider = ImgSpider()
    rootUrl = "http://www.baidu.com/"
    imgSpider.startCraw(rootUrl)
    print "结束主方法"
