'''
Created on 2012-1-30

@author: xiaokan
'''
from xiaokan.Crawler import Crawler
from xiaokan.GeneralUtils import GoogleHelper, GoogleResultsParser, SerializationUtils
import socket
import time

#Top number of Google results to initialize crawling
TOP_NUMBER = 20

#Maximum number of pages in crawling
N = 1000

#Maximum number of google pages to crawl
MAX_PAGES = 5

#Global setting for socket timeout
TIME_OUT = 5 #seconds

#If this constant is True, program will ignore the crawler's 
#results and run from the beginning, or it will continue from 
#the data obtained before if it exists.
#If you altered keywords, this should be set to True
ALWAYS_IGNORE_RESULTS_BEFORE = False




def main():
    
    #count start time
    start_time = time.time()    
    #Set globally socket timeout parameter
    socket.setdefaulttimeout(TIME_OUT)
    
    #first crawling google results
    keywords = "Central+South+University"
    google_helper = GoogleHelper()
    google_parser = GoogleResultsParser()
    data = google_helper.getHTMLResults(keywords)
    serl_util = SerializationUtils()
#    file_handler = open("d:/11.html","r")
#    data = file_handler.read()
    crawler = serl_util.is_crawled_before()
    
    if ALWAYS_IGNORE_RESULTS_BEFORE or crawler == None:   #Self-explanatory

        '''
        Start crawling and parsing google results
        '''
        google_parser.feed(data)
        links = google_parser.get_results(TOP_NUMBER)
        count = 1
        
        while len(links) < TOP_NUMBER:     #if the first page of google results isn't enough, get more pages and then parse
            if count > MAX_PAGES: 
                break
            data = google_helper.getHTMLResults(keywords, count)   #get next page of Google Search Result
            google_parser.reset()
            google_parser.feed(data)
            temp_links = google_parser.get_results(TOP_NUMBER - len(links))
            links.extend(temp_links)
            count += 1
        
        '''
        Start crawling from the links in google results
        '''
        
        crawler = Crawler()
        crawler.set_max_number_of_pages(N)
        crawler.feed_starter_addrs(links)
        
    else:
        #Crawler has already been loaded from file, run from data before
        pass
        
    crawler.crawl()
    
    end_time = time.time()
    
    running_time = (end_time - start_time) / 60
    print "System done crawling " + str(crawler.count) + " pages(" + str(crawler.queue.total_bytes) + " bytes) in " + str(running_time) + " minutes"


if __name__ == '__main__':
    main()
