#!/usr/bin/env python
# coding=utf-8

# URL
url = 'http://s241.photobucket.com'

import urllib2, HTMLParser
from BeautifulSoup import BeautifulSoup
import os, sys

class MyHTMLParser():
    """my HTML Parser"""
    def __init__(self):
        """init"""
        # if it has next page, it will be the URL of next page
        self.go_next = ""
        # the photos links
        self.links = []
        # the target image link
        self.link = ""
        # start to download the pictures
        self.download = 0

    def feed(self, data):
        """parse the data"""
        soup = BeautifulSoup(data)
        # searching photos setp
        if not self.download:
            # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
            # --------------------------------------
            # find the next page link
            # target link has a string next and under a div which class name is containerPageNav
            # find the div that class is containerPageNav
            thing_div = soup('div', attrs={"id":"paginator"})[0]
            # find all links under the div  block
            # only the last link should be the next page, but the last page this link should have disable class
            i = thing_div.contents[1]
            i = i.contents[-1]                          # get the last link, but i can be a spac tag
            if i.has_key('class') and i['class'] == 'disabled':
                print "That's the last page"
            else:
                self.go_next = i['href']
            # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>

            # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
            # --------------------------------------
            # find the photos links
            # target links is under a div which class is frame
            # find all div tags which class is frame
            things = soup('span', attrs={"class":"thumb"})
            for i in things:
                if i.a:
                    self.links.append(i.a['href'])
            # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>

        # get the images links
        # target link is an image source which id is fullSizedImage
        elif self.download:
            # <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
            # --------------------------------------
            # find the img that id is fullSizedImage
            thing = soup('img', attrs={"id":"image"})[0]
            # get the src value from the img tag
            self.link = thing['src']
            # >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>


class main:
    """main class"""
    def __init__(self, url, de_bug=False):
        # debug, if true will not really download the pictures
        self.de_bug = de_bug
        # last processing url
        self.page_url = ""
        self.link_list = []
        self.img_list = []
        # the url of next page
        self.next_page = ""
        # total images that downloaded
        self.total = 0
        print "Program Start"
        url = url.replace(' ', '%20')
        self.getPage(url)
        print "Search all pictures' links Finished!!"
        print "All links was found: \n", self.link_list
        print "Total :", self.total
        raw_input("Prease Enter to Continues")
        self.getPic(self.link_list)

    def getPic(self, link_list):
        """get the Pictures from Flickr"""
        nn = 0;
        for i in link_list:
            for j in i:
                print "[%d of total %d]   ", % (nn++, self.total)
                data = self.goLink(j)
                if (data):
                    # go to the final step, parse the data find the image's url
                    parser = MyHTMLParser()
                    parser.download = 1
                    parser.feed(data)
                    # get the image url
                    self.img_list.append(parser.link)
                    try:
                        self.downLink(parser.link)
                    except urllib2.URLError, e:
                        print e, 
                        print "   ...Retry once"
                        self.downLink(parser.link)

    def getPage(self, url):
        """get the HTML Page"""
        # get the data
        data = self.goLink(url)
        if ( data ):
            # creat the HTML Parser
            parser = MyHTMLParser()
            # get the information
            parser.feed(data)
            # get the search result
            self.link_list.append(parser.links)
            self.total += len(parser.links)
            while (parser.go_next):
                print "Next: %s" % parser.go_next
                data = self.goLink(parser.go_next)
                if data:
                    parser = MyHTMLParser()
                    parser.feed(data)
                    #print parser.result()
                    self.link_list.append(parser.links)
                    self.total += len(parser.links)

    def downLink(self, input_url):
        """down a file from url"""
        # check the url
        if input_url.startswith('http'):
            # this is an absolute url
            url = input_url
        elif input_url.startswith('/'):
            # it is a relative url, but relate to the root
            a = self.page_url.split('/')
            url = a[0] + '//' + a[2] + input_url
        else:
            # it is a relative url also, but relate to "the current page"
            a = self.page_url.rfind('/') + 1
            url = self.page_url[0:a] + input_url
        print "Download this: ", url
        if not self.de_bug and not os.path.exists(file_name):
            request = urllib2.Request(url, None,
                    {'User-agent': 'Mozilla/5.0 (X11; U; Linux i686; zh-TW; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11)',
                     'Referer': self.page_url})
            img = urllib2.urlopen(request)
            file_name = img.geturl().split('/')[-1]
            file = open(file_name, "wb")
            print >> file, img.read()
            file.close()
            img.close()

    def goLink(self, input_url):
        """open a url, and return the data"""
        # check the url
        if ( input_url.startswith('http') ):
            url = input_url
        elif ( input_url.startswith('/') ):
            a = self.page_url.split('/')
            url = a[0] + '//' + a[2] + input_url
        else:
            a = self.page_url.rfind('/') + 1
            url = self.page_url[0:a] + input_url
        print "Open URL: %s" % url
        print "Loading........."
        # creat a request
        request = urllib2.Request(url, None, {'User-agent': 'Mozilla/5.0 (X11; U; Linux i686; zh-TW; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11)'})
        # open the url
        html_p = urllib2.urlopen(request)
        # get the html page data from http server
        data = html_p.read()
        html_p.close()
        # return the data
        print "download page successful"
        self.page_url = html_p.geturl()
        return data


if __name__ == "__main__":
    if (len(sys.argv) == 2):
        main(sys.argv[1])
    else:
        print "DeBug mode"
        url = raw_input("Please input the url: ")
        #main(url, de_bug=True)
        main(url, de_bug=False)
