#
# TCSS 422 - Spring 2013
# Paul Ganea, Ralph Feltis, Tarl Hahn
# Team: Something Awesome (We can't remember the exact name)
# 5/2/2013
#

#
# Multi threaded implementation of a web crawler
#
import threading
from AbstractCrawler import AbstractCrawler
        
class MultiThreadedCrawler(AbstractCrawler):
    
    def __init__(self, numberRetrieverThreads, numberParserThreads):
        
        super(MultiThreadedCrawler, self).__init__()
        
        self.threads = []
        self.numberRetrieverThreads = numberRetrieverThreads 
        self.numberParserThreads = numberParserThreads
            
    def Crawl(self, seedURL):
        super(MultiThreadedCrawler, self).StartCrawling()        
        
        for i in range(self.numberRetrieverThreads):
            t = threading.Thread(target=lambda: self.LoopingRetrieveURL(), name="Retriever" + str(i))
            t.daemon = True
            super(MultiThreadedCrawler, self).getLog().put("Spinning up " + t.getName(), True, None)
            t.start()
            self.threads.append(t)
        for i in range(self.numberParserThreads):
            t = threading.Thread(target=lambda: self.LoopingParsePage(), name="Parser" + str(i))
            t.daemon = True
            super(MultiThreadedCrawler, self).getLog().put("Spinning up " + t.getName(), True, None)
            t.start()
            self.threads.append(t)
            
        super(MultiThreadedCrawler, self).enqueueURL(seedURL)
                
    def LoopingRetrieveURL(self):
        while True:
            super(MultiThreadedCrawler, self).RetrieveURL()
            
    def LoopingParsePage(self):
        while True:
            super(MultiThreadedCrawler, self).ParsePage()