'''
Created on 2012-2-1

@author: xiaokan
'''
from xiaokan.GeneralUtils import HyperlinkParser
from xiaokan.Pojo import Page
from xiaokan.RobotsUtils import RobotsHelper
import hashlib
import time

class CrawlingQueue(object):
    '''
    This is a queue which used in Breadth-First-Search-styled crawling that contains nodes and its data
    '''
    
    #Page saving directory
    PAGE_SAVING_DIRECTORY = 'd:/google_pages/'
    
    #Main thread dequeue sleep interval, if there isn't one node in queue to process, main thread should sleep MAIN_THREAD_INTERVAL seconds 
    MAIN_THREAD_INTERVAL = 0.2

    def __init__(self):
        self._queue = []        #BFS queue
        self._visited = {}      #An dictionary used to check if certain page is visited before
        self.visted_urls = []   #An list used to store those urls visited for statistics in this format: [ url, length, depth ]
        self.link_parser = HyperlinkParser()
        self.total_bytes = 0
        self.count = 0          #How many pages have been processed
        
        self.page_crawled = {}  #This dictionary stores pages as "{md5 checksum of html: True}",
                                #I use it to check the ambiguity of urls, if two different url
                                #have same md5 checksum of contents, they are considered the same.
        
    def get_queue_length(self):
        return len(self._queue)

    def enqueue(self, url, _depth=0):
        node = Page()
        node.addr = url
        node.depth = _depth
        robot_helper = RobotsHelper()
        
        if not robot_helper.is_url_allowed(node.addr):
            return False       
        
        if not self.checkVisited(node):       #if this page is not visited, make the node and its properties, enqueue it
            p = self.link_parser
            p.reset()
            print "Start"
            feed_res = p.feed_url(node.addr)
            print "End"
            if feed_res == False:
                return False
            
            md5 = hashlib.md5()                         
            md5.update(p.get_data())
            if self.page_crawled.has_key(md5.hexdigest()):  #Check the md5 checksum of this page contents, 
                                                            #if exists in page_crawled, skip crawling this page,
                                                            #or save it and go on
                return False
            else:
                self.page_crawled[md5.hexdigest()] = True

            node.links = p.get_links()
            node.data = p.get_data()
            node.length = len(node.data)
            
            self.total_bytes += node.length            
            self._queue.append(node)
            self._visited[node.addr] = True
            self.visted_urls.append([node.addr, node.length, node.depth])
            self.save_page2disk(node)         #save the page to disk
            
            self.count += 1
            print "No.", self.count
            return True                       #this page is valid, return true
        else:
            return False                      #this page is visited, return false
            
        
    def dequeue(self):
        if len(self._queue) == 0:
            time.sleep(self.MAIN_THREAD_INTERVAL)        #Pause
            return None
        ret = self._queue[0]
        self._queue[:0] = []
        return ret
    
    def getHead(self):
        _len = len(self._queue)
        if _len == 0:
            return None
        else:
            return self._queue[0]
    
    def getTail(self):
        _len = len(self._queue)
        if _len == 0:
            return None
        else:
            return self._queue[_len - 1]

    def checkVisited(self, node):
        if node.addr in self._visited:
            return True
        else:
            return False
        
    def save_page2disk(self, page):   #save the data of the page to disk with the md5 value of url as the filename
        md5 = hashlib.md5()
        md5.update(page.addr)
        fout = open(self.PAGE_SAVING_DIRECTORY + md5.hexdigest() + ".html", 'w')
        fout.write(page.data)
        fout.flush()
        fout.close()
        
