print "hello world!"

"""IMGUR downloader with Tkinter GUI
Made by: Wouter Visee, woutervisee@gmail.com
Business model: freemium, shareware, freeware, FOSS freeware, paid?


Features to be added: download all images by 1 reddit user
keep a list of albums to watch, and automatically download

Enter the url, the download, folder, and press 'download'
Settings should be stored as .ini in the same folder as the .py
Progess should be visible in a text-widget
:"""

import Tkinter, sys, os, re, time, urllib, threading, subprocess, datetime, Queue, tkFileDialog
import xml.dom.minidom as minidom
from Tkinter import *

print "Running the GUI-imgur-downloader-program"

class MyApp:
    def __init__(self, parent):
        self.myParent = parent
        self.mainwindow = Frame(parent)
        self.mainwindow.pack()

        self.button1 = Button(self.mainwindow, command=self.dlbuttonclick)
        self.button1["text"]= "Click here to download!"
        self.button1.pack()
        
        self.button2 = Button(self.mainwindow, command=self.folderbuttonclick)
        self.button2["text"]="Choose download folder.."
        self.button2.pack()

        self.text1 = Text(self.mainwindow)
        self.text1.insert(END, "Here goes the URL of the gallery that you want to download!")
        self.text1.pack()
          
    def dlbuttonclick(self):
        print "the download-button has been clicked."
        global dlurl
        dlurl = self.text1.get('1.0', 'end')
        print "The input given is %s" % dlurl
        main()
        
    def folderbuttonclick(self):
        print "the folder-button has been clicked"
        global dirname
        dirname = tkFileDialog.askdirectory(parent=root,initialdir="/",title='Pick a directory')



class ImgurAlbum(object):
    """ Model object for imgur albums """
 
    def __init__(self, name, imageList):
        if (name == ''): # empty album names suck! Make one up!
            self.name = raw_input("Please enter the name for this album")
        else:
            self.name = name
        self.imageList = imageList
 
    def enqueueImages(self, queue):
        """ Adds the image list to a queue for dispatching """
 
        # make sure the directory exists
        # before the DownloadThreads take over!
        dirname = 'imgur/' + self.name.replace(' ', '') + '/'
        dir = os.path.dirname(dirname)
        if not os.path.exists(dir):
            os.makedirs(dir)
 
        # enqueue the images into the dispatch queue
        for image in self.imageList:
            queue.put(image)
 
class DownloadThread(threading.Thread):
    """ Threaded image downloader """
 
    def __init__(self, albumName, queue):
        threading.Thread.__init__(self)
        self.queue = queue
        self.albumName = albumName
 
    def run(self):
        while True:
            image = self.queue.get()
            origName = image[image.rfind('/') + 1:]
            fileName = 'imgur/' + self.albumName.replace(' ', '') + '/' + origName
 
            if os.path.exists(fileName) != True:
                try:
                    # open the local file and write the image into it...
                    output = open(fileName, 'wb')
                    imgData = urllib.urlopen(image).read()
                    output.write(imgData)
                    output.close()
 
                    # display a nice progress dot (without print's newline)
                    sys.stdout.write('.')
                    sys.stdout.flush()
                except:
                    print "File read error!"
            else: # File already exists; do not overwrite
                print "File %s exists!" % fileName
 
            # signal the dispatch queue that this task is complete
            self.queue.task_done()
 
class PageParser(object):
    """ Imgur gallery page parser """
 
    def __init__(self, url):
        self.url = url
        self.imageList = []
 
    def parse(self):
        self._parse(self.url)
        return ImgurAlbum(self.imageList[0], self.imageList[1:])
 
    def _parse(self, url):
        print "The URL is %s" % url
        page = urllib.urlopen(url).read()
       
 
        if page.find('subdomain_css') != -1:
            links = self._parseSubdomain(url)
 
            for linkURL in links:
                test = self._parse(linkURL)
 
        elif page.find('album_css') != -1:
            self.imageList.extend(self._parseAlbum(url))
 
        elif page.find('gallery_css') != -1:
            self.imageList.extend(self._parseGallery(url))
 
    def _parseSubdomain(self, url):
        page = urllib.urlopen(url).read()
        links = []
        last = 0
 
        tag = '"cover"'
 
        while 1:
 
            last = page.find(tag, last)
 
            if last == -1:
                break
 
            links.append( "http:"+page[page.find('href=', last)+6:page.find('">', last+9)]+"/all" )
 
            last = last + 9
 
        return links
 
    def _parseAlbum(self, url):
        albumimages = []
        page = urllib.urlopen(url).read()
 
        null=False
 
        titleStart = page.find("data-title")+12
        albumimages.append(page[titleStart:page.find('"',titleStart)])
        # print "parsing album"
 
        start = page.find("images:", page.find("ImgurAlbum"))+8
        rawAlbumdata = page[start: page.find("]}", start)+2]
 
        albumdata = eval(rawAlbumdata)
 
        for i in albumdata["items"]:
            albumimages.append( "http://i.imgur.com/"+i["hash"]+i["ext"] )
 
        return albumimages
 
    def _parseGallery(self, url):
        gallery = urllib.urlopen(url).read()
        maxpage = gallery.find("maxPage:")
        pagecount = gallery[maxpage+8:gallery.find(",", maxpage)].replace(' ','')
        baseUrl = gallery.find("baseURL:")
        url = "http://www.imgur.com"+gallery[baseUrl+8:gallery.find(",", baseUrl)].replace(' ','').replace("'",'')
        galleryname = gallery[baseUrl+8:gallery.find(",", baseUrl)].replace(' ','').replace('/','').replace("'",'')
        galleryimages = [galleryname]
 
        for page in range(eval(pagecount)):
            if url[-1:] == "/":
                xmlurl = url + "hot/page/"+str(page)+".xml"
            else:
                xmlurl = url + "/hot/page/"+str(page)+".xml"
 
            xml = urllib.urlopen(xmlurl).read()
 
            print "Page %s" % page
 
            last = 0
 
            xml.count("/hash")
 
            while 1:
                hash = xml.find("<hash>", last)
 
                if hash == -1:
                    break
 
                link =  xml[ hash+6: xml.find("</", hash) ] 
 
                extPos = xml.find("<ext>", hash)
                ext = xml[ extPos+5 : xml.find("</", extPos) ] 
 
                galleryimages.append( "http://i.imgur.com/"+link+ext )
 
                last = hash+1
 
        return galleryimages
 
def numberOfCPUs():
    # Just default to 4 - no need to have the number of threads equal the number of CPU's.
    return 4
 
def main():
    """ Core downloader function """
 
    # Dispatch queue
    queue = Queue.Queue() 
 
    # Get user input
    url = dlurl
 
    # Parse the imgur gallery/album/subdomain page
    # into an ImgurAlbum object
    p = PageParser(url)
    album = p.parse()
 
    # Scale the number of worker threads to the
    # the smaller of (number of images in album, number of CPUs)
    threads = min(len(album.imageList), numberOfCPUs())
 
    start = time.time()
    print "Fetching '%s' (%d images)" % (album.name, len(album.imageList))
    
    print "Downloading with %d threads..." % threads
 
    # Spin up the desired number of worker threads
    for i in range(threads):
        dt = DownloadThread(album.name, queue)
        dt.setDaemon(True)
        dt.start()
 
    # Pour the images into the dispatch queue
    # to start our work...
    album.enqueueImages(queue)
 
    # block until queue is empty
    queue.join()
    print "\n"
    print "DONE! Elapsed time: %.2f seconds" % (time.time() - start)
 

root = Tk()
myapp = MyApp(root)
root.mainloop()