#!/usr/bin/env python
# coding=utf-8

import urllib2, HTMLParser
from BeautifulSoup import BeautifulSoup
import os, sys

class MyHTMLParser():
    """my HTML Parser"""
    def __init__(self):
        """init"""
        # if it has next page, it will be the URL of next page
        self.go_next = ""
        # the photos links
        self.links = []
        # the target image link
        self.link = ""
        # start to download the pictures
        self.download = 0

    def feed(self, data):
        """parse the data"""
        soup = BeautifulSoup(data)
        # searching photos setp
        if not self.download:
            # find the next page link
            # target link has a class pageNext and under a div which class is pageSelector
            # find the div taht class is pageSelector
            # the page have tow target class, they are same so we only need the first one
            thing_div = soup('div', attrs={"class":"pageSelector"})[0]
            # find all links under the div  block
            links = thing_div('a')
            for i in links:
                # if the link's class is pageNext, that is the next page link
                if i['class']:
                    if i['class'] == 'pageNext':
                        self.go_next = i['href']
            # find the photos links
            # target links is under a table which class is thumbBox, inside the table have a div which class is thumbImg. thrget links is inside the div
            # find all td tags which class is thumbImg
            things = soup('div', attrs={"class":"thumbImg"})
            for i in things:
                # get the links inside the div
                if i.a:
                    self.links.append(i.a['href'])
        # get the images links
        # target link is under a div which id is imageFrame
        elif self.download:
            # find the div that id is imageFrame
            thing = soup('div', attrs={"id":"imageFrame"})[0]
            # get the src value from the img tag
            self.link = thing.img['src']


class main:
    """main class"""
    def __init__(self, url):
        # last processing url
        self.page_url = ""
        self.link_list = []
        self.img_list = []
        # downloading which photo
        self.downloading = 0
        # total images that downloaded
        self.total = 0
        print "Program Start"
        self.getPage(url)
        print "Search all pictures' links Finished!!"
        self.getPic(self.link_list)

    def getPic(self, link_list):
        """get the Pictures from Flickr"""
        for i in link_list:
            for j in i:
                data = self.goLink(j)
                if (data):
                    # go to the final step, parse the data find the image's url
                    parser = MyHTMLParser()
                    parser.download = 1
                    parser.feed(data)
                    # get the image url
                    self.img_list.append(parser.link)
                    # download the image
                    self.downloading += 1
                    print "downloading No. %s of total %s photos" % (self.downloading, self.total)
                    self.downLink(parser.link)

    def getPage(self, url):
        """get the Flickr HTML Page"""
        # get the data
        data = self.goLink(url)
        if ( data ):
            # creat the HTML Parser
            parser = MyHTMLParser()
            # get the information
            parser.feed(data)
            # get the search result
            self.link_list.append(parser.links)
            self.total += len(parser.links)
            while (parser.go_next):
                print "Next: %s" % parser.go_next
                data = self.goLink(parser.go_next)
                if data:
                    parser = MyHTMLParser()
                    parser.feed(data)
                    #print parser.result()
                    self.link_list.append(parser.links)
                    self.total += len(parser.links)
        print "found total %s photos" % self.total

    def downLink(self, input_url):
        """down a file from url"""
        # check the url
        if input_url.startswith('http'):
            # this is an absolute url
            url = input_url
        elif input_url.startswith('/'):
            # it is a relative url, but relate to the root
            a = self.page_url.split('/')
            url = a[0] + '//' + a[2] + input_url
        else:
            # it is a relative url also, but relate to "this page"
            a = self.page_url.rfind('/') + 1
            url = self.page_url[0:a] + input_url
        print "Download this: ", url
        request = urllib2.Request(url, None,
                {'User-agent': 'Mozilla/5.0 (X11; U; Linux i686; zh-TW; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11)',
                 'Referer': self.page_url})
        img = urllib2.urlopen(request)
        file_name = img.geturl().split('/')[-1]
        file = open(file_name, "wb")
        print >> file, img.read()
        file.close()
        img.close()

    def goLink(self, input_url):
        """open a url, and return the data"""
        # check the url
        if ( input_url.startswith('http') ):
            url = input_url
        elif ( input_url.startswith('/') ):
            a = self.page_url.split('/')
            url = a[0] + '//' + a[2] + input_url
        else:
            a = self.page_url.rfind('/') + 1
            url = self.page_url[0:a] + input_url
        print "Open URL: %s" % url
        print "Loading........."
        # creat a request
        request = urllib2.Request(url, None, {'User-agent': 'Mozilla/5.0 (X11; U; Linux i686; zh-TW; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11)'})
        # open the url
        html_p = urllib2.urlopen(request)
        # get the html page data from http server
        data = html_p.read()
        html_p.close()
        # return the data
        print "download page successful"
        self.page_url = html_p.geturl()
        return data


if __name__ == "__main__":
    if (len(sys.argv) == 2):
        main(sys.argv[1])
    else:
        url = raw_input(":")
        main(url)
