#  crawler of cs6913 #  written in Python 3.1 #  author Liang Xinzhao

#configure variables and constants of crawler


GOOGLE = 1000
NORMAL_SITE = 1010

# # of threads (better smaller than 10, must bigger than 0)
THREAD_NUM = 9

# time-out time
CRAWL_TIMEOUT = 999

# time that define whether a page has been visited before, 
#if bigger than now - visited_time, download this page again
EARLY_VISITED_BEFORE = 86399 

# The Crawl Amount 
CRAWL_AMOUNT_LIMIT = 20

# Whether write the crawled page to harddisk
FLAG_OUTPUT2DISK = True
# Whether print the successful signal
FLAG_PRINT_SUC = True
# the minimum errors allow for a site,if so ,no longer add its link into queue
MIN_ERRORS_ALLOWED_FOR_A_SITE = 50
# if an url consider more than this # of slashes would be abandoned
WIERED_SLASHES = 10

# flags for parser
FLAG_PARSE_HREF = True
FLAG_PARSE_SRC = True
# file type
TYPE_HTML = 2000
TYPE_XML  = 2001
TYPE_JPG  = 3002
TYPE_JPEG = 3003
TYPE_GIF  = 3004
TYPE_NONE = 1999

# only crawl html and xml
#TYPE_WANTED = [TYPE_HTML,TYPE_XML]
# if want to crawl image, use this setting
TYPE_WANTED = [TYPE_HTML,TYPE_XML,TYPE_JPG,TYPE_JPEG]

TYPE_DICT = {TYPE_HTML : "text/html",
            TYPE_XML  : "text/xml",
            TYPE_JPG  : "image/jpg",
            TYPE_JPEG : "image/jpeg",
            TYPE_GIF  : "image/gif"
            }

EXT_DICT = {TYPE_HTML :  ".html",
           TYPE_XML  :  ".xml",
           TYPE_JPG  :  ".jpg",
           TYPE_JPEG :  ".jpeg",
           TYPE_GIF  :  ".gif"
           }
# default file extension for the downloaded page
DEFAULT_FILE_EXT = '.html'

# current working directory
import os
WORKING_DIRECTORY = os.path.abspath(os.curdir)

# directory for storing the downloaded page
PAGE_STORE_DIRECTORY = WORKING_DIRECTORY+"/pages/"

# directory for crawling logs
LOG_DIRECTORY = WORKING_DIRECTORY+"/log/"

# error log file
LOG_OF_ERRORS_PATH = LOG_DIRECTORY+"error_page.log"

# crawling log file
LOG_OF_CRAWLED_URL = LOG_DIRECTORY+"record_url.log"
LOG_OF_CRAWLED_CONTENT = LOG_DIRECTORY+"record_content.log"
# user_agent
agents = {"Mozilla_Browser": "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:9.0.1) Gecko/20100101 Firefox/9.0.1",
          "Google":"Googlebot",
          "baidu":"Baiduspider",
          "yahoo":"Yahoo!slurp"
          }
# default user-agent: agents[0]
CRAWL_USERAGENT = agents["Mozilla_Browser"]

# error number
ERROR_GENERAL = -999
ERROR_ROBOT_DECTECTED_EARLIER = -1000
ERROR_EARLY_VISIT = -1001
ERROR_BLOCKED_BY_ROBOT = -1002
ERROR_HTML_PARSE = -1003
ERROR_WANTED_CONTENT_TYPE = -1004
ERROR_DUMP_RECORD = -1005
ERROR_READ_DOWNLOADED_PAGE = -1006
ERROR_DUPLICATE_CONTENT = -1007
ERROR_CONTENT_TYPE_NOT_FOUNDED = -1008
#error_msg
msg = {ERROR_ROBOT_DECTECTED_EARLIER:"the robot info has been dectected",
       ERROR_EARLY_VISIT:"This page has been visited recently",
       ERROR_BLOCKED_BY_ROBOT:"This page is not allowed by robots.txt",
       ERROR_HTML_PARSE:"Downloaded. But the HTML could not be parsed",
       ERROR_WANTED_CONTENT_TYPE:"NOT a wanted content type",
       ERROR_DUMP_RECORD:"Error when dump records!",
       ERROR_READ_DOWNLOADED_PAGE:"Error when read the downloaded file",
       ERROR_DUPLICATE_CONTENT:"Error for duplicate content with other page crawled",
       ERROR_GENERAL:"Error occured, see log file!",
       ERROR_CONTENT_TYPE_NOT_FOUNDED:"Error, cant find the content type header"
       }
