'''

@author: varounisdi
'''
import re, threading,urllib2,socket
from time import ctime,sleep
from Queue import Queue, Empty


class Retriever(object):
    def __init__(self,sitename):
        self.sitename=sitename
        
    def getHTMLlinksfromURL(self,label,url):
        urlist=[]
        pageHTML=self.getHTMLPage(url)
        for line in pageHTML[:]:
            match = re.search('<a href="(.*)".*>.*'+label+'.*</a>', line)
            if match is not None:
                hrefurl=match.group(1)
                hrefurl='http://'+self.sitename+hrefurl
                urlist.append(hrefurl)
    
        return urlist

    def getHTMLPage(self,url):
        try:
            pagefile = urllib2.urlopen(url, timeout=15)
            
        except (urllib2.HTTPError,urllib2.URLError):
            raise
            return None
        else:
            pageHTML = pagefile.readlines()
        return pageHTML


class MyThread(threading.Thread):
    def __init__(self, func, name=''):
        threading.Thread.__init__(self)
        self.name = name
        self.func = func


    def getResult(self):
        return self.res

    def run(self):
        print 'starting', self.name, 'at:', ctime()
        ##self.res = apply(self.func, self.args)
        self.res = self.func()
        print self.name, 'finished at:', ctime()



class WebSiteRobot():
    stopflag=False
    startflag=False
    def __init__(self,sitename,queue):
        self.sitename=sitename
        self.torrents=[]
        self.queue=queue

    def getBookTorrents(self,pageurl):
        
        ret=Retriever(self.sitename)
        bookslist=ret.getHTMLlinksfromURL('Read More',pageurl)
                    
        for bookurl in bookslist:
            if self.__class__.stopflag==False:
                torrentlink=ret.getHTMLlinksfromURL('Download This Torrent',bookurl)
                print 'Produce Link: ', torrentlink
                self.queue.put(torrentlink,1)
                if self.__class__.startflag==False:self.__class__.startflag=True
            else:
                break
    
        

    def produce(self):
        siteurl='http://'+self.sitename
        Pgs=[siteurl+'/all-'+str(i)+'.html' for i in range(2,3)]
        Pgs.insert(0, 'http://'+self.sitename)
        for page in Pgs:
            print 'Page: ', page
            try:
                self.getBookTorrents(page)
            except (urllib2.HTTPError,urllib2.URLError), X:
                if isinstance(X.reason, socket.timeout):
                    print 'Got timeout while opening a page...'
                else:
                    print 'Error opening a page...'
                print X.reason
                self.__class__.startflag=True
                break
        
    def consume(self):
        #TODO How to stop this when queue == Empty???
        while self.__class__.startflag==False:
            sleep(1)
        while True:
            try:
                i=self.queue.get(timeout=5)
                print 'Consume : ' + i[0]
                f=urllib2.urlopen(i[0])
                cdisptxt=f.info()['Content-Disposition']
                pattobj  = re.compile('attachment; filename="(.*)"')
                print pattobj.match(cdisptxt).group(1)
                localfile=open('C:\\'+pattobj.match(cdisptxt).group(1),'w')
                localfile.write(f.read())
                if i[0]=='http://'+self.sitename+'/download.php?id=12198':
                    print 'stop the train!!!'
                    self.__class__.stopflag=True
            except Empty:
                print 'URL Queue is empty!'
                print 'Exiting...'
                break                  


def main():
    sitename = 'www.ebooks.net'
    q=Queue(50)
    wr=WebSiteRobot(sitename,q)
    funcs=[wr.produce,wr.consume]
    nfuncs = range(len(funcs))

    threads = []
    for i in nfuncs:
        t = MyThread(funcs[i],funcs[i].__name__)
        threads.append(t)
   
    for i in nfuncs:
  
        threads[i].start()
        
       
    for i in nfuncs: # wait for completion
        threads[i].join()

    
        
if __name__ == '__main__':
    main()