import math
import os
import sys
import urlparse
from robotexclusionrulesparser import RobotFileParserLookalike


def allowToCrawlOrNot( index, url ):
    o = urlparse.urlparse( url )
    baseURL = o.scheme + "://" + o.netloc
    robotsPolicyURL = urlparse.urljoin(baseURL,"robots.txt")
    
    flag = True
    try:
        rp = RobotFileParserLookalike()
        rp.set_url(robotsPolicyURL)
        rp.read()
        flag = rp.can_fetch("*", url)
        print index,robotsPolicyURL,url,flag
        #print index,robotsPolicyURL,url,flag
        return flag
    except Exception:
        print index,robotsPolicyURL,url,flag,"Exception"
        return flag


def loadBlackList():
    dict = {}
    inputFileHandler2 = open("/data/weijiang/BingDataSetDownloading/urls-related/url-black-list-20120605","r")
    for currentLine in inputFileHandler2.readlines():
        if currentLine.strip() not in dict:
            dict[currentLine.strip()] = 1
        else:
            dict[currentLine.strip()] += 1
    return dict


print "program for splitting the urls."
print "Updated by Wei by 2012/06/06"


inputFileHandler = open("/data/weijiang/BingDataSetDownloading/programs/urls-head-1000.txt","r")
for index, line in enumerate( inputFileHandler.readlines() ):
    allowToCrawlOrNot(index, line.strip() )
    
    
