#
# TCSS 422 - Spring 2013
# Paul Ganea, Ralph Feltis, Tarl Hahn
# Team: Something Awesome (We can't remember the exact name)
# 5/2/2013
#

#
# Abstract class that other classes can use to implement a crawler that retrieves and parses webpages
#
import Queue, time, threading, sys, urllib2, time, mutex, re
import robotparser
from Reporter import Reporter
from MyHTMLParser import MyHTMLParser
from Stat_Bean import Stat_Bean
from urlparse import urlparse, urljoin
        
class AbstractCrawler(object):
    
    KEYWORD_FILE = "keyword.txt"    
    
    def __init__(self):  
        
        self.unprocessedURLs = Queue.Queue()
        self.unparsedPages = Queue.Queue()
        self.log = Queue.Queue()
        self.visitedURLsMutex = threading.Lock()
        self.reporter = Reporter()
        self.visitedURLs =[]
        self.keywords = []
        self.starttime = time.clock() #Grab time intialized
        self.readKeywords()
        self.pageCount = 0
        self.maxPages = 100
        self.isCrawling = False
                    
    # retrieve the page here
    def RetrieveURL(self):
        url = self.unprocessedURLs.get(True, None)
        if self.isCrawling:
            self.log.put("(" + threading.currentThread().getName() + ") Retrieving " + str(url))
            hdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',
           'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
           'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
           'Accept-Encoding': 'none',
           'Accept-Language': 'en-US,en;q=0.8',
           'Connection': 'keep-alive'}
    
            # Follow robots.txt rules        
            parsed = urlparse(url)
            rp = robotparser.RobotFileParser()
            rp.set_url(parsed.scheme + "://" + parsed.netloc + "/robots.txt")
            try:            
                rp.read()
                retrieveAllowed = rp.can_fetch("*", url)
            except:
                retrieveAllowed = False
                self.log.put("(" + threading.currentThread().getName() + ") Error when pulling robots.txt for " + str(url))
                
            
            if (retrieveAllowed):
                urllib2.Request(url, headers=hdr)
                try:
                    response = urllib2.urlopen(url)
                    self.unparsedPages.put(response)
                except:
                    self.log.put("(" + threading.currentThread().getName() + ") Could not open " + str(url))
            else:
                self.log.put("(" + threading.currentThread().getName() + ") Robots.txt prevents crawling of " + str(url))


    def ParsePage(self):
        response = self.unparsedPages.get(True, None)
        if response.info()["Content-Type"][0:9] == "text/html" or response.info()["Content-Type"][0:10] == "text/plain":
            start_time = time.clock()
            word_count = 0
            parser = MyHTMLParser()
            parser.keywords = self.keywords          
            self.log.put("(" + threading.currentThread().getName() + ") Parsing " + response.url)
            try:                  
                parser.feed(response.read())
                parser.close()       
            except:
                self.log.put("(" + threading.currentThread().getName() + ") Malformed HTML when parsing " + str(response.url))
            word_freq = {} #List of words and counts
            for index in range(len(parser.list)): #Parse all data found from this page
                line = parser.list[index] #Take the first line
                word_list= re.split("[^\w']+", line.lower())
                word_list = filter(None, word_list)
                word_count+=len(word_list)
                for word in word_list: #Think of word as the String key value of a HashSet                
                    if word in self.keywords:
                        word_freq[word] = word_freq.get(word, 0) + 1 
            for url in parser.urls:
                self.enqueueURL(urljoin(response.url, url))
            
            the_bean = Stat_Bean(response.url,word_freq, time.clock()-start_time, len(parser.urls), word_count)
            self.reporter.addBean(the_bean)
        else:
            self.log.put("(" + threading.currentThread().getName() + ") Skipping parse for " + response.url + ". Content-Type was " + response.info()["Content-Type"])
        
           
    def enqueueURL(self, url): # To check directly for links we use this method
        isValidURL = True
        
        # Strip off anchors
        url = url.split("#")[0].lower()  
        if url == "":
            isValidURL = False  
        
        # Make sure it is a valid URL
        parsed = urlparse(url)              
        if parsed.scheme == "":
            url = "http://" + url
            parsed = urlparse(url)
        if parsed.scheme != "http":
            isValidURL = False            
        
        # Make sure we haven't visited this URL
        self.visitedURLsMutex.acquire()
        if isValidURL and url in self.visitedURLs:
            isValidURL = False;
        else:
            self.visitedURLs.append(url)
            self.pageCount = self.pageCount + 1
        self.visitedURLsMutex.release()        
            
        if isValidURL and self.pageCount <= self.maxPages:
            self.log.put("(" + threading.currentThread().getName() + ") Enqueueing " + url)
            
            # TODO: Store the url as lowercase
            self.unprocessedURLs.put(url, True, None)
        
        
    def readKeywords(self):
        file = open(self.KEYWORD_FILE, 'r')  # Load keywords into array
        for line in file.readlines():
            line = line.rstrip()
            if line != "":
                self.keywords.append(line)
        file.close()
        
        
    def getLog(self):
        return self.log
        
    
    def getUnprocessedURLs(self):
        return self.unprocessedURLs
    
    
    def getUnparsedPages(self):
        return self.unparsedPages
            
        
    def getMaxPages(self):
        return self.maxPages
    
    
    def setMaxPages(self, newMax):
        self.maxPages = newMax

        
    def StartCrawling(self):
        self.isCrawling = True
        self.pageCount = 0
        self.reporter.startTime = time.clock()
        
        
    def StopCrawling(self):
        self.isCrawling = False
        self.unprocessedURLs.empty()
        self.unparsedPages.empty()    