import os, sys, re, time, gzip
import simplejson as json
import urllib, urllib2, httplib
from BeautifulSoup import BeautifulSoup
from urlparse import urlparse, urlsplit
import StringIO
import mimetypes, mimetools
from ConfigParser import ConfigParser
import JobScraper
from LogHandler import Logger
from Jobs import Job
import Tools.Utils as Utils
import socket



class TimesJobsScraper(JobScraper.JobScraper):
    DEBUG = True
    # Regex patterns specific to timesjobs.com:
    jobCountSentencePattern = re.compile(r"(\d+)\s+Job\(s\)\s+Found<\/b>\s*<b>\s*Displaying\s+\d+\s+to\s+\d{2,3}\s*<\/b>", re.IGNORECASE | re.MULTILINE | re.DOTALL)
    sequenceNumberPattern = re.compile(r"&sequence=\d+")
    noResultsPattern = re.compile(r"No\s+Results\s+Found", re.IGNORECASE | re.MULTILINE | re.DOTALL)
    nbspPattern = re.compile(r"&nbsp;", re.IGNORECASE | re.DOTALL)
    multipleDotsPattern = re.compile(r"\.+", re.DOTALL)

    # Patterns used in parsing job details pages:
    jobTitleDivPattern = re.compile(r"jobTitle\s+fl") # For identifying the div tag containing job title.
    keysSkillsHeaderPattern = re.compile(r"KEY\s+SKILLS", re.IGNORECASE | re.MULTILINE | re.DOTALL)
    roleHeaderPattern = re.compile(r"SPECIALIZATION", re.IGNORECASE | re.MULTILINE | re.DOTALL)
    jobFunctionHeaderPattern = re.compile("JOB\s+FUNCTION", re.IGNORECASE | re.MULTILINE | re.DOTALL)
    postedOnValuePattern = re.compile(r"Job\s+Posted\s+on:\s+(\d{1,2})\s+(\w{3})\,\s+(\d{4})\s+", re.IGNORECASE | re.MULTILINE | re.DOTALL)
    jobDescriptionHeaderPattern = re.compile(r"Job\s+Description", re.IGNORECASE | re.MULTILINE | re.DOTALL)
    desiredCandidateProfileHeaderPattern = re.compile(r"Desired\s+Candidate\s+Profile:", re.IGNORECASE | re.MULTILINE | re.DOTALL)
    postedByPattern = re.compile(r"Job\s+Posted\s+by", re.IGNORECASE | re.MULTILINE | re.DOTALL)
    
    #telephoneNumberPattern = re.compile(r"(Call|Tel|No|Num|#\w*)[^a-z0-9]{1,}(\d{0,3}\s?\-?\s?\d{0,2}\s?\-?\s?\d{8,10})[^a-z0-9]", re.IGNORECASE | re.MULTILINE | re.DOTALL). 
    # Note: The number of non-word characters in between the patterns have been restricted to 9 since regex search method gets stuck at some pages if the range is open ended ("\W{1,}"). 
    telephoneNumberPattern = re.compile(r"\W([\w\s]+)[^0-9]{1,9}(\d{0,3}\s?\-?\s?\d{0,2}\s?\-?\s?\d{5,8})\W", re.IGNORECASE | re.MULTILINE | re.DOTALL)
    mobileNumberPattern = re.compile(r"\W([\w\s]+)[^0-9]{1,9}(\d{0,3}\s?\-?\s?\d{8,10})\W", re.IGNORECASE | re.MULTILINE | re.DOTALL)
    emailIdPattern = re.compile(r"\W([\w\s]+)[^0-9a-zA-Z]{1,9}([\w\.\-_]+\@[\w\.]+)\W", re.IGNORECASE | re.MULTILINE | re.DOTALL)

    def __init__(self, cfgFile):
	super(TimesJobsScraper, self).__init__(cfgFile, "http://www.timesjobs.com/", "http://www.timesjobs.com/candidate/login.html")
	self.httpHeaders['Referer'] = self.loginPageUrl
	# The attribute 'jobsByIndustry' will be a dict of industry names to jobs under that industry. The first level keys would
	# be the industry names, the second level keys would be the URL to the job details page for each job, and the value would
	# be a 'Job' object as defined in the module 'Jobs.py'. (The attributes of a 'Job' object are 'JobTitle', 'CompanyName', 
	# 'JobDescription', 'Locations', 'Experience', 'KeySkills', 'Function', 'Role', 'PostedOn', 'ContactDetails' and 'JobUrl')
	self.jobsByIndustry = {} # The end result will be stored as a 2-level dictionary with industry names as first level keys and a
				# dictionary containing jobs info for that industry as values. The keys for this second level dictionary
				# are the URLs to the job specification pages and the values are the 'Job' objects created from the info
				# from those URLs. This info will be generated in-memory and stored in 'timesjobsObject.jobsByIndustry'.
	self.industryWiseCounts = {}
	self.industryLinks = {} # This dictionary will hold the URLs to each of the industries listed in 'timesjobs.com'. The industry
				# names will be the keys and the URLs to the industry specific pages will be the values.
	self.requestTimeout = 30
	socket.setdefaulttimeout(self.requestTimeout)


    # Method to login into a account (as job seeker) on timesjobs.com
    # The account credentials are passed as arguments to the method.
    # Both arguments ('username' and 'password') are mandatory. 
    # The return value is the HTML content of the page that appears
    # immediately after logging in. The same content is also stored
    # and accessible as '<object>.currentPageContent'. 
    # Note: the value of the 'username' parameter may be an email Id.
    def doLogin(self, username, password):
	formData = self._getLoginFormElementsDict()
	formData["j_username"] = username
	formData["j_password"] = password
	self.postData = urllib.urlencode(formData)
	httpHeaders = {}
	for hdrkey in self.httpHeaders.keys():
	    if hdrkey == 'Keep-Alive':
		continue
	    httpHeaders[hdrkey] = self.httpHeaders[hdrkey]
	httpHeaders['Cache-Control'] = "max-age=0"
	httpHeaders['Content-Type'] = "application/x-www-form-urlencoded"
	httpHeaders['Content-Length'] = len(self.postData)
	httpHeaders['Origin'] = "http://www.timesjobs.com"
	httpHeaders['Accept-Encoding'] = "gzip,deflate,sdch"
	httpHeaders['Accept-Charset'] = "ISO-8859-1,utf-8;q=0.7,*;q=0.3"
	httpHeaders['Referer'] = self.loginPageUrl
	self.pageRequest = urllib2.Request(self.requestUrl, self.postData, httpHeaders)
	try:
            self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
	    self.sessionCookies = self.updateCookies(self.pageResponse)
	    if self.__class__.DEBUG:
	        print "Cookies Received 1: " + self.sessionCookies.__str__()
	    httpHeaders["Cookie"] = self.sessionCookies
        except:
            print sys.argv[0] + ": " + sys.exc_info()[1].__str__()
	# In this response we also expect a 'Location' header
	responseHeaders = self.pageResponse.info()
	if responseHeaders.getheader('Location'):
	    self.requestUrl = responseHeaders.getheader('Location')
	else:
	    print sys.argv[0] + ": " + "Could not get the expected redirection URL. Exiting... "
	    sys.exit()
	# Delete the keys 'Origin', 'Content-Length' and 'Content-Type' from httpHeaders
	del httpHeaders['Origin']
	del httpHeaders['Content-Length']
	del httpHeaders['Content-Type']
	for hkey in self.httpHeaders.keys():
	    if httpHeaders.has_key(hkey):
	    	self.httpHeaders[hkey] = httpHeaders[hkey] # Set the value for the key in 'self.httpHeaders'...
		del httpHeaders[hkey] # ...and now delete this key from 'httpHeaders'
	    else:
		del self.httpHeaders[hkey] # Delete all keys that are not present in 'httpHeaders'
	# Now there might be some keys in 'httpHeaders' that have not been transferred to 'self.httpHeaders'. Set them for 'self.httpHeaders'
	for hkey in httpHeaders.keys():
	    self.httpHeaders[hkey] = httpHeaders[hkey]
	    del httpHeaders[hkey]
	self.pageRequest = urllib2.Request(self.requestUrl, None, self.httpHeaders)
	# Send the redirect request...
	try:
            self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
	    self.sessionCookies = self.updateCookies(self.pageResponse)
	    if self.__class__.DEBUG:
	        print "Cookies Received 2: " + self.sessionCookies.__str__()
	    httpHeaders["Cookie"] = self.sessionCookies
        except:
            print sys.argv[0] + ": " + sys.exc_info()[1].__str__()
	self.httpHeaders['Cookie'] = self.sessionCookies
	self.currentPageContent = self.__class__._decodeGzippedContent(self.getPageContent())
	return(self.currentPageContent)


    # Method to retrieve the form elements and their values
    # from the login form. This is supposed to be called from
    # 'doLogin' method. Return value is a dictionary with the
    # element names as keys and their values (if available)
    # as values. This method also sets the values of self.requestUrl
    # and self.requestMethod to the values of the 'action' and 
    # 'method' attributes of the form respectively.
    def _getLoginFormElementsDict(self):
	soup = None
	try:
	    soup = BeautifulSoup(self.currentPageContent)
	except:
	    self.currentPageContent = self.__class__.sanitizePageHTML(self.currentPageContent)
	    soup = BeautifulSoup(self.currentPageContent)
	loginForm = soup.find("form", {'name' : 'loginForm' })
	currentUrl = self.requestUrl
	currentUrlParts = currentUrl.split("/")
	currentUrlPath = "/".join(currentUrlParts[:-1])
	if not loginForm.has_key("action"):
	    self.requestUrl = ""
    	else:
	    self.requestUrl = loginForm["action"]
	if not self.__class__._isAbsoluteUrl(self.requestUrl):
	    self.requestUrl = currentUrlPath + os.path.sep + self.requestUrl
	self.requestMethod = loginForm["method"]
	allInputs = loginForm.findAll("input")
	formElementsDict = {}
	for inputTag in allInputs:
	    if inputTag.has_key("name"):
		formElementsDict[inputTag["name"]] = ""
	    if inputTag.has_key("value"):
		formElementsDict[inputTag["name"]] = inputTag["value"]
	formElementsDict['x'] = 39 # TODO: Find out how this gets generated.
	formElementsDict['y'] = 19 # TODO: Find out how this gets generated.
	return(formElementsDict)


    # Customized cookie extractor for timesjobs
    def updateCookies(self, httpResponse):
	cookies = ""
	cookiesDict = {}
	# Handle new cookies;
        httpResponseHeaders = httpResponse.info()
        responseCookies = httpResponseHeaders.getheaders("Set-Cookie")
        if responseCookies.__len__() > 1:
            for cookie in responseCookies:
		cookie = re.sub(re.compile(r"\""), "", cookie)
		if not re.compile(r"=").search(cookie):
		    continue
                cookieParts = cookie.split(";")[0].split("=")
		cookieName, cookieValue = "", ""
		if cookieParts.__len__() == 1:
		    cookieName = cookieParts[0]
		elif cookieParts.__len__() > 1:
		    cookieName = cookieParts[0]
		    cookieValue = cookieParts[1]
		cookieName = cookieName.strip()
		cookieValue = cookieValue.strip()
		if cookieValue == "" or cookieValue == "\"\"":
		    continue
		cookiesDict[cookieName] = cookieValue
	# then handle existing cookies ...
	sessCookiesList = self.httpHeaders['Cookie'].split(";")
	for sessCookie in sessCookiesList:
	    sessCookie = sessCookie.strip()
	    if sessCookie == "":
		continue
	    cookieName, cookieValue = "", ""
	    if re.compile(r"=").search(sessCookie):
	    	cookieName, cookieValue = sessCookie.split("=")
	    else:
		continue
	    cookieName = cookieName.strip()
	    cookieValue = cookieValue.strip()
	    if cookieName == "Path" or cookieName == "Comment" or cookieName == "Max-Age" or cookieName == "Version":
		continue
	    if not cookiesDict.has_key(cookieName):
		cookiesDict[cookieName] = cookieValue
	# ... and merge new cookies with existing ones.
	for cookieName in cookiesDict.keys():
	    cookies += cookieName + "=" + cookiesDict[cookieName] + ";"
	# Finally, before returning, prepend 'killmenothing' cookie.
	cookies = "killmenothing; " + cookies
        return(cookies)


    # This method accepts a URL as a parameter and attempts to navigate to the
    # page pointed to by that URL using HTTP GET. It supports redirections using
    # 'Location' headers (calls itself recursively if 'Location' header is present. 
    def navigatePage(self, pageUrl):
	self.requestUrl = pageUrl
	self.pageRequest = urllib2.Request(self.requestUrl, None, self.httpHeaders)
	try:
	    self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
	    self.sessionCookies = self.updateCookies(self.pageResponse)
	    self.httpHeaders["Cookie"] = self.sessionCookies
	    self.httpHeaders["Referer"] = self.requestUrl
	except:
	    print __file__.__str__() + ": " + sys.exc_info()[1].__str__()
	self.currentPageContent = self.__class__._decodeGzippedContent(self.getPageContent())
	responseHeaders = self.pageResponse.info()
	if responseHeaders.getheader('Location'):
	    self.requestUrl = responseHeaders.getheader('Location')
	else:
	    return (self.currentPageContent)
	if not self.__class__._isAbsoluteUrl(self.requestUrl):
	    self.requestUrl = currentUrlPath + os.path.sep + self.requestUrl
	self.currentPageContent = self.navigatePage(self.requestUrl)
	return(self.currentPageContent)


    # This method navigates to the page from which a job search may be launched.
    def getSearchInterfacePage(self, searchPageUrl="http://www.timesjobs.com/candidate/job-search.html"):
	refererCookiePattern = re.compile(r"referer=\"\"")
	self.httpHeaders['Cookie'] = re.sub(refererCookiePattern, "referer=\"" + self.httpHeaders['Referer'] + "\"", self.httpHeaders['Cookie'])
	pageContent = self.navigatePage(searchPageUrl)
	return(pageContent)


    # This method is similar to 'navigatePage()', but it doesn't set the caller's object
    # attributes. So, after calling this method you will still have your object's 
    # 'requestUrl', 'pageRequest', 'pageResponse', 'httpHeaders', 'sessionCookies' and
    # 'currentPageContent' attributes unchanged. 'navigatePage()' changes them all, but
    # fetchPage doesn't. It simply fetches the page pointed to by the URL ('pageUrl')
    # passed to it using the state of the caller object, and returns the HTML (text)
    # content of the page. However, it does handle redirections like 'navigatePage()'.
    # On failure in any of the HTTP requests, it returns 'None'.
    def fetchPage(self, pageUrl):
	httpHeaders = {}
	# Copy all http headers with the exception of 'Keep-Alive'
	for hdrKey in self.httpHeaders.keys():
	    if hdrKey == "Keep-Alive":
		continue
	    httpHeaders[hdrKey] = self.httpHeaders[hdrKey]
	httpRequest = urllib2.Request(pageUrl, None, self.httpHeaders)
	pageContent = None
	nextUrl = None
	pageResponse = None
	sys.stdout.flush()
	try:
	    pageResponse = self.no_redirect_opener.open(httpRequest, None, self.requestTimeout)
	except:
	    print __file__.__str__() + ": " + sys.exc_info()[1].__str__()
	    return (None)
	rawPageContent = ""
	try:
	    rawPageContent = pageResponse.read()
	except:
	    print "Failed to read content from '%s': %s"%(pageUrl, sys.exc_info()[1].__str__())
	pageContent = self.__class__._decodeGzippedContent(rawPageContent)
	responseHeaders = pageResponse.info()
	if responseHeaders.getheader('Location'):
	    nextUrl = responseHeaders.getheader('Location')
	else:
	    if pageContent is None:
		print "Could not fetch page content from '%s'"%pageUrl
	    return (pageContent)
	if not self.__class__._isAbsoluteUrl(nextUrl):
	    currentUrlPath = Utils.getUrlDirPathFromUrl(self.requestUrl)
	    nextUrl = currentUrlPath + os.path.sep + nextUrl
	pageContent = self.fetchPage(nextUrl)
	return(pageContent)



    # This method creates a dictionary containing the names of all industries retrieved from the
    # page listing the industries. The names of the industries are the keys of the dictionary and
    # the URL to the starting page of job listing for each of them are the values. 
    # The method returns this dictionary ('self.industryLinks'). 
    def _extractIndustriesLinks(self):
	self.industryLinks = {}
	pageContent = self.currentPageContent
	soup = None
	try:
	    soup = BeautifulSoup(pageContent)
	except:
	    pageContent = pageContent.decode("ascii", "ignore")
	    pageContent = self._handleCustomShitInHTML(pageContent)
	    soup = BeautifulSoup(pageContent)
	# Find the '<select>' dropdown that lists all industries in the page
	industriesSelect = soup.find("select", {'id' : 'cboIndustry'})
	allIndustryOptions = []
	if industriesSelect is not None:
	    allIndustryOptions = industriesSelect.findAll("option")
	industryPageUrl = "job-search.html?from=submit&searchType=Industry&cboIndustry="
	currentUrlPath = Utils.getUrlDirPathFromUrl(self.requestUrl)
	for indOption in allIndustryOptions:
	    indName = indOption.renderContents().strip()
	    indOptValue = indOption['value']
	    self.industryLinks[indName] = currentUrlPath + "/" + industryPageUrl + indOptValue
	return(self.industryLinks)


    # Wrapper over _extractIndustriesLinks so that we can use it
    # in 'main()'. ('_extractIndustriesLinks' could also be used
    # directly from 'main()', but since it was written to be used
    # as a private method, we created this wrapper to make use of
    # it externally.
    def getJobListingUrlsByIndustry(self):
	self._extractIndustriesLinks()
	return(self.industryLinks)


    # This method creates a dictionary containing the count of jobs posted for each of the listed
    # industries. For this dictionary, the names of the industries are the keys and the counts of
    # jobs under them are the values ('self.industryWiseCounts' is set). Please note that for this 
    # method to successfully populate 'industryWiseCounts' attribute of 'TimesJobs' object, it should
    # be called only after a call to the 'getJobListingUrlsByIndustry' method (or, its underlying
    # '_extractIndustriesLinks' method). On failure, it returns 'None'.
    def getIndustryWiseJobCounts(self):
	if type(self.industryLinks) == dict and self.industryLinks.keys().__len__() > 0:
	    for indName in self.industryLinks.keys():
		industryPageContent = self._getFirstJobListingPage(indName)
		if industryPageContent is None: # Possibly could not fetch 'industryPageContent', may be due to poor connectivity.
		    continue
		jobCountSearch = self.__class__.jobCountSentencePattern.search(industryPageContent)
		jobCount = None
		if jobCountSearch is not None:
		    jobCount = jobCountSearch.groups()[0]
		self.industryWiseCounts[indName] = jobCount
	else:
	    self.industryWiseCounts = {}
	return(self.industryWiseCounts)


    # Computes the URL of the next jobs listing page. 
    # This should be called only in the context of 
    # retrieving job details pages for a specific industry.
    def getNextPageUrl(self, startPageUrl, pageNum):
	nextPageUrl = startPageUrl
	if not re.compile(r"luceneResultSize=\d+").search(nextPageUrl):
	    nextPageUrl += "&luceneResultSize=50"
	if not re.compile(r"pDate=Y").search(nextPageUrl):
	    nextPageUrl += "&pDate=Y"
	if not self.__class__.sequenceNumberPattern.search(nextPageUrl):
	    nextPageUrl += "&sequence=" + pageNum.__str__()
	else:
	    nextPageUrl = re.sub(self.__class__.sequenceNumberPattern, "&sequence=" + pageNum.__str__(), nextPageUrl)
	if not re.compile(r"startPage=\d+").search(nextPageUrl):
	    nextPageUrl += "&startPage=1"
	return (nextPageUrl)


    # This method fetches the first job listing page
    # for a given industry. The name of the industry
    # is passed as an argument to this method. This 
    # is used in 'getIndustryWiseJobCounts' method 
    # and 'getJobsDict' method. This method returns
    # the content of the page it fetches. On failure
    # it returns 'None'.
    def _getFirstJobListingPage(self, indName):
	if indName is None or indName.strip() == "" or not self.industryLinks.has_key(indName):
	    return (None)
	industryUrl = self.industryLinks[indName]
	industryPageContent = self.fetchPage(industryUrl)
	return(industryPageContent)


    # Fetch the contents of the job details page.
    def _getJobDetailsPage(self, detailsUrl):
	jobDetailsPageContent = self.fetchPage(detailsUrl)
	return(jobDetailsPageContent)


    # Parse a page containing jobs listing and extract all the URLs to the
    # job details page for each of the jobs listed in that page.
    # Returns a list of URLs to all the jobs listed on the page.
    def _getJobLinksFromPage(self, pageContent, logObj=None):
	jobLinksList = []
	if pageContent is None:
	    return (jobLinksList)
	soup = None
	try:
	    soup = BeautifulSoup(pageContent)
	except:
	    if logObj is not None:
		logObj.write("Page content was insane... Sanitizing it\n")
	    pageContent = self._handleCustomShitInHTML(pageContent)
	    pageContent = self.__class__.sanitizePageHTML(pageContent)
	    pageContent = pageContent.decode("ascii", "ignore")
	    soup = BeautifulSoup(pageContent)
	allJobHeaderDivs = soup.findAll("div", {'class' : 'job_hdtxt'})
	for jobHdrDiv in allJobHeaderDivs:
	    nextAnchorTag = jobHdrDiv.findNext("a")
	    if not nextAnchorTag or not nextAnchorTag.has_key("href"):
		continue
	    jobDetailsPageLink = nextAnchorTag['href']
	    if not self.__class__._isAbsoluteUrl(jobDetailsPageLink): # Is the URL an absolute URL? If not, make it absolute.
	    	jobDetailsPageLink = self.baseUrl + os.path.sep + jobDetailsPageLink
	    jobLinksList.append(jobDetailsPageLink)
	return(jobLinksList)


    # Method to find out all instances of a specified regex pattern ('pattern') in a given string ('text').
    # Returns a list of all match objects. Argument 'pattern' should be a compiled regex pattern.
    def _findall(cls, pattern, text):
	allMatches = []
	matchObj = pattern.search(text)
	if not matchObj:
	    return allMatches
	else:
	    matchEnd = matchObj.end()
	    allMatches.append(matchObj)
	    text = text[matchEnd+1:]
	    while(text.__len__() > 0 and matchObj is not None):
		matchObj = pattern.search(text)
		if not matchObj:
		    break
		else:
	    	    matchEnd = matchObj.end()
	    	    allMatches.append(matchObj)
	    	    text = text[matchEnd+1:]
	return(allMatches)
    _findall = classmethod(_findall)



    # This method parses a job details page and returns
    # a dictionary. The keys are the various attributes
    # supported by the 'Job' class. Additionally, this 
    # method also populates the '<TimesJobs object>.industryWiseCounts'
    # attribute if it is not populated at the time of call.
    # Note: The 'JobUrl' attribute is not populated for any job
    # in this method. We just put the 'JobUrl' key in the 
    # dictionary before returning it. The URL of the job details
    # page should be added as the value for the key 'JobUrl'
    # after the call to this method in 'getJobsDict' method.
    def parseJobDetailsPage(self, pageContent):
	jobAttribs = {}
	if not pageContent or pageContent == "":
	    if self.DEBUG:
	    	print "Details page content is empty..."
	    return jobAttribs
	soup = None
	try:
	    soup = BeautifulSoup(pageContent)
	except:
	    pageContent = self._handleCustomShitInHTML(pageContent)
	    pageContent = pageContent.decode("ascii", "ignore")
	    pageContent = re.sub(self.__class__.nbspPattern, " ", pageContent) # Expect a lot of nbsp in details pages.
	    try:
	    	soup = BeautifulSoup(pageContent)
	    except:
		soup = None # If we still couldn't get to parse the page and create 'soup', then leave it.
	    if self.DEBUG:
	    	print "Handled malformed HTML in details page..."
	if not soup: # TODO: Handle parsing if 'soup' creation fails (may be due to bad HTML) with regular expressions.
	    if self.DEBUG:
	    	print "Failed at 'parseJobDetailsPage' method: Could not create soup object."
	    return (jobAttribs)
	jobTitleDivTag = soup.find("div", {'class' : self.__class__.jobTitleDivPattern})
	if not jobTitleDivTag: # TODO: If we can't find the div tag we are looking for, then handle parsing using regular expressions.
	    if self.DEBUG:
	    	print "Failed at 'parseJobDetailsPage' method: Could not find the div tag for job title."
	    return (jobAttribs)
	h1Tag = soup.find("h1") # The details page contains only 1 'h1' tag - This displays the job title.
	if h1Tag is not None:
	    jobAttribs['JobTitle'] = h1Tag.renderContents().strip()
	else:
	    jobAttribs['JobTitle'] = ""
	companyNameParagraphTag = soup.find("p", {'class' : 'jd_comp'})
	if companyNameParagraphTag is not None:
	    jobAttribs['CompanyName'] = companyNameParagraphTag.renderContents().strip()
	else:
	    jobAttribs['CompanyName'] = ""
	font13ParagraphsList = soup.findAll("p", {'class' : 'font13'})
	if font13ParagraphsList.__len__() > 0 and font13ParagraphsList[0] is not None:
	    jobAttribs['Locations'] = font13ParagraphsList[0].renderContents().strip()
	else:
	    jobAttribs['Locations'] = ""
	if font13ParagraphsList.__len__() > 1 and font13ParagraphsList[1] is not None:
	    jobAttribs['Experience'] = font13ParagraphsList[0].renderContents().strip()
	else:
	    jobAttribs['Experience'] = ""
	attribHeadersSpansList = soup.findAll("span", {'class' : 'li_HD'})
	for spanTag in attribHeadersSpansList:
	    attribHeaderSpanContent = spanTag.renderContents().strip()
	    # If '<span>' contains the pattern "KEY\s+SKILLS", then next '<span>' holds the value(s) for key skills.
	    if self.__class__.keysSkillsHeaderPattern.search(attribHeaderSpanContent):
		keySkillsValuesSpan = spanTag.findNext("span", {'class' : 'mrgn_t8 disBlk'})
		jobAttribs['KeySkills'] = ""
		if keySkillsValuesSpan is not None:
		    allAnchors = keySkillsValuesSpan.findAll("a")
		    for atag in allAnchors:
			jobAttribs['KeySkills'] += atag.renderContents().strip()
	    elif self.__class__.roleHeaderPattern.search(attribHeaderSpanContent):
		roleValuesSpan = spanTag.findNext("span", {'class' : 'mrgn_t8 disBlk'})
		jobAttribs['Role'] = ""
		if roleValuesSpan is not None:
		    jobAttribs['Role'] = roleValuesSpan.renderContents().strip()
	    elif self.__class__.jobFunctionHeaderPattern.search(attribHeaderSpanContent):
		jobFunctionValuesSpan = spanTag.findNext("span", {'class' : 'mrgn_t8 disBlk'})
		jobAttribs['Function'] = ""
		if jobFunctionValuesSpan is not None:
		    jobAttribs['Function'] = jobFunctionValuesSpan.renderContents().strip()
	    else:
		continue
	# Get 'Posted On' value
	postedOnSpanTag = soup.find("span", {'class' : 'fnt11'})
	jobAttribs['PostedOn'] = ""
	if postedOnSpanTag is not None:
	    postedOnSpanContents = postedOnSpanTag.renderContents().strip()
	    postedOnSpanSearch = self.__class__.postedOnValuePattern.search(postedOnSpanContents)
	    if postedOnSpanSearch is not None:
		postedOnSpanValues = postedOnSpanSearch.groups()
		jobAttribs['PostedOn'] = postedOnSpanValues[0] + " " + postedOnSpanValues[1] + ", " + postedOnSpanValues[2]
	# Get 'Job Description' value
	allParagraphTags = soup.findAll("p", {'class' : 'jd_HD'})
	jobAttribs['JobDescription'] = ""
	for pTag in allParagraphTags:
	    pTagContents = pTag.renderContents().strip()
	    if self.__class__.jobDescriptionHeaderPattern.search(pTagContents):
		nextTdTag = pTag.findNext("td")
		jobAttribs['JobDescription'] = nextTdTag.renderContents().strip()
		jobAttribs['JobDescription'] = re.sub(self.__class__.htmlTagPattern, "", jobAttribs['JobDescription'])
	    elif self.__class__.desiredCandidateProfileHeaderPattern.search(pTagContents):
		nextTdTag = pTag.findNext("td")
		jobAttribs['JobDescription'] += "  Desired Candidate Profile: "
		jobAttribs['JobDescription'] += nextTdTag.renderContents().strip()
		jobAttribs['JobDescription'] = re.sub(self.__class__.htmlTagPattern, "", jobAttribs['JobDescription'])
	jobAttribs['JobDescription'] = re.sub(self.__class__.nbspPattern, " ", jobAttribs['JobDescription'])
	# Remove unicode characters from job description
	jobAttribs['JobDescription'] = jobAttribs['JobDescription'].decode("ascii", "ignore")
	# Get contact information from 'Job Description', 'Desired Candidate Profile', and 'Job Posted By' sections.
	jobAttribs['ContactDetails'] = ""
	contactInfoDict = {} # A dictionary that would contain telephone/mobile numbers or email Ids as keys and the contact persons names as values.
	# Regexes to eliminate strings like 'Call', 'Id', 'number', 'phone', 'mobile', 'or' etc.
	unimportantWordPatternsList = [re.compile(r"call", re.IGNORECASE | re.DOTALL), re.compile(r"n[oumber]+", re.IGNORECASE | re.DOTALL), re.compile(r"phone", re.IGNORECASE | re.DOTALL), re.compile(r"mobile", re.IGNORECASE | re.DOTALL), re.compile(r"email", re.IGNORECASE | re.DOTALL), re.compile(r"id", re.IGNORECASE | re.DOTALL), re.compile(r"contact", re.IGNORECASE | re.DOTALL)]
	# Handle 'Job Description' and 'Desired Candidate Profile' sections - looking for anything that resembles a telephone/mobile number or email Id.
	if self.DEBUG:
	    print "Trying to find telephone numbers in job description text %s characters long...."%jobAttribs['JobDescription'].__len__().__str__()
	telephoneNumberMatches = self.__class__._findall(self.__class__.telephoneNumberPattern, jobAttribs['JobDescription'])
	if self.DEBUG:
	    print "Found %s telephone number matches.\n"%telephoneNumberMatches.__len__().__str__()
	if telephoneNumberMatches is not None and telephoneNumberMatches.__len__() > 0:
	    for telNumMatch in telephoneNumberMatches:
	    	firstTelNumGroup, telephoneNumber = telNumMatch.groups() # 'firstGroup' may or may not contain the name of the contact person
	    	contactInfoDict[telephoneNumber] = firstTelNumGroup
	mobileNumberMatches = self.__class__._findall(self.__class__.mobileNumberPattern, jobAttribs['JobDescription'])
	if mobileNumberMatches is not None and mobileNumberMatches.__len__() > 0:
	    for mobNumMatch in mobileNumberMatches:
	    	firstMobileNumGroup, mobileNumber = mobNumMatch.groups()
	    	contactInfoDict[mobileNumber] = firstMobileNumGroup
	emailIdMatches = self.__class__._findall(self.__class__.emailIdPattern, jobAttribs['JobDescription'])
	if emailIdMatches is not None and emailIdMatches.__len__() > 0:
	    for emailIdMatch in emailIdMatches:
	    	firstEmailIdGroup, emailId = emailIdMatch.groups()
	    	contactInfoDict[emailId] = firstEmailIdGroup
	for numberOrId in contactInfoDict.keys():
	    contactName = contactInfoDict[numberOrId]
	    for wordPattern in unimportantWordPatternsList:
		if wordPattern.search(contactName):
		    contactInfoDict[numberOrId] = ""
		else:
		    contactInfoDict[numberOrId] = contactName.lower()
	    if contactName != "":
	    	jobAttribs['ContactDetails'] += contactName + ": "
	    jobAttribs['ContactDetails'] += numberOrId + "; "
	# Handle 'Job Posted by' section - to look for the same type of information we collected from 'Job Description' section.
	jobPostedByContents = ""
	postedByPTag = soup.find("p", {'class' : 'jd_HD company_job'})
	# If 'postedByPTag' is not 'None', find the contents of the 'td' tag immediately succeeding it.
	if postedByPTag is not None and self.__class__.postedByPattern.search(postedByPTag.renderContents()):
	    nextTDTag = postedByPTag.findNext("td")
	    if nextTDTag is not None:
		tdTagContents = nextTDTag.renderContents()
		tdTagContents = re.sub(self.__class__.htmlTagPattern, " ", tdTagContents)
		tdTagContents = re.sub(self.__class__.nbspPattern, " ", tdTagContents)
		jobPostedByContents = tdTagContents
	    nextUlTag = postedByPTag.findNext("ul", {'class' : 'jdUL'})
	    if nextUlTag is not None:
		ulTagContents = nextUlTag.renderContents()
		ulTagContents = re.sub(self.__class__.htmlTagPattern, " ", ulTagContents)
		ulTagContents = re.sub(self.__class__.nbspPattern, " ", ulTagContents)
		jobPostedByContents += ulTagContents
	    jobAttribs['ContactDetails'] += jobPostedByContents
	return(jobAttribs)
	


    # We will try to eliminate all <script > ... </script> tags
    # and their contents. Javascript code has escaped sequences
    # in a lot of places and this causes BeautifulSoup to throw
    # the well known "malformed start tag..." error.
    def _handleCustomShitInHTML(self, htmlContent):
	if htmlContent is None:
	    return None
	scriptSplitParts = htmlContent.split("<script")
	htmlContent = ""
	for htmlPart in scriptSplitParts:
	    if re.compile(r"</script>", re.IGNORECASE | re.DOTALL).search(htmlPart):
	    	scriptAndHtmlParts = htmlPart.split(r"</script>")
	    	htmlContent += scriptAndHtmlParts[1]
	    else:
		htmlContent += htmlPart
	# Some pages (job details pages) have tags with event handler calls without quotes... Remove them
	onMouseHandlerPattern = re.compile(r"onmouse\w+[^>]+>", re.IGNORECASE | re.DOTALL)
	htmlContent = re.sub(onMouseHandlerPattern, ">", htmlContent)
	return (htmlContent)


    # This method extracts all jobs info for a specified
    # industry (passed as an argument). It handles extraction
    # of jobs info from multiple pages of listing, gets the
    # jobs details by extracting info from the job details 
    # page and creates a 'Job' object for each job processed.
    # It returns the info as a dictionary, whose keys are the
    # URLs to the job details pages, and the values are the
    # 'Job' objects created using the info for each job. It 
    # gets the URL to the first page of the job listing for 
    # the specified industry by using the 'industryLinks'
    # object attribute populated by the call to 'getJobListingUrlsByIndustry'.
    def getJobsDict(self, industry, logObj=None):
	if logObj is not None:
	    logObj.write("Processing industry '%s'...\n"%industry)
	print "Processing industry '%s'..."%industry
	jobsDict = {}
	industryUrl = self.industryLinks[industry]
	firstListingPage = self._getFirstJobListingPage(industry)
	if self.industryWiseCounts.keys().__len__() == 0 or not self.industryWiseCounts.has_key(industry) or self.industryWiseCounts[industry] is None and firstListingPage is not None:
	    jobCountSearch = self.__class__.jobCountSentencePattern.search(firstListingPage)
	    jobCount = None
	    if jobCountSearch is not None:
		jobCount = jobCountSearch.groups()[0]
	    self.industryWiseCounts[industry] = jobCount
	nextPageExistsFlag = True
	jobDetailPageUrlsList = []
	listingPageContent = firstListingPage
	pageNum = 1
	while nextPageExistsFlag:
	    urlsList = self._getJobLinksFromPage(listingPageContent, logObj)
	    if self.DEBUG:
	    	print "Fetched %s urls from page %s"%(urlsList.__len__().__str__(), pageNum.__str__())
	    if logObj is not None:
	    	logObj.write("Fetched %s urls from page %s"%(urlsList.__len__().__str__(), pageNum.__str__()))
	    jobDetailPageUrlsList.extend(urlsList)
	    pageNum = pageNum + 1
	    nextListingPageUrl = self.getNextPageUrl(industryUrl, pageNum)
	    if self.DEBUG:
		print "Processing page #%s from '%s'...\n"%(pageNum.__str__(), nextListingPageUrl)
	    listingPageContent = self.fetchPage(nextListingPageUrl)
	    soup = None
	    try:
		soup = BeautifulSoup(listingPageContent)
	    except:
		if logObj is not None:
		    logObj.write("Listing page from '%s' is badly formed...\n"%nextListingPageUrl)
		if self.DEBUG:
		    print "Problem parsing content from '%s' - %s"%(nextListingPageUrl, sys.exc_info()[1].__str__() + "\n")
		listingPageContent2 = self._handleCustomShitInHTML(listingPageContent)
		listingPageContent2 = self.__class__.sanitizePageHTML(listingPageContent2)
		if listingPageContent2 is not None and type(listingPageContent2) == str:
		    listingPageContent2 = listingPageContent2.decode("ascii", "ignore")
		else:
		    listingPageContent2 = ""
		listingPageContent = listingPageContent2
		soup = BeautifulSoup(listingPageContent2)
	    if not soup:
		if logObj is not None:
		    logObj.write("Could not create 'BeautifulSoup' object with page from '%s'.\nSkipping the page...\n"%nextListingPageUrl)
		continue # Should consider what to do more judicially later on. Might want to 'break' out as well.
	    divHdTag = soup.find("div", {'class' : 'hd'})
	    if not divHdTag:
		if logObj is not None:
		    logObj.write("Could not find tag <div class='hd'> in page from '%s'.\n Page contains jobs list.\n"%nextListingPageUrl)
		continue
	    divHdTagContents =  divHdTag.renderContents()
	    if self.__class__.noResultsPattern.search(divHdTagContents): # We have reached the end of the listings for the given industry.
	    	nextPageExistsFlag = False
		if logObj is not None:
		    logObj.write("Collected %s job URLs for '%s' industry\n"%(jobDetailPageUrlsList.__len__().__str__(), industry))
		print "Collected %s job URLs for '%s' industry"%(jobDetailPageUrlsList.__len__().__str__(), industry)	
	# At this point we have links to all the jobs in the given industry. So now we will start iterating over these links to
	# fetch the job details page from each of them, parse those job detail pages to extract information, create a 'Job'
	# object using each set of information, and finally, we create a dictionary with the URL of the job detail page as key
	# and the 'Job' object as the corresponding value. At the end, we return this dictionary.
	# TODO: Should be handled using multiple threads of execution by a parser process.
	detailsCounter = 1
	for jobDetailsPageUrl in jobDetailPageUrlsList:
	    jobDetailsPageContent = self._getJobDetailsPage(jobDetailsPageUrl)
	    jobAttribsDict = self.parseJobDetailsPage(jobDetailsPageContent)
	    jobAttribsDict['JobUrl'] = jobDetailsPageUrl # We could not extract 'JobUrl' from job details page. So we add it here.
	    print "Processing %s of total %s pages"%(detailsCounter.__str__(), jobDetailPageUrlsList.__len__().__str__())
	    for attribKey in jobAttribsDict.keys():
		jobAttribsDict[attribKey] = jobAttribsDict[attribKey].decode("ascii", "ignore")
		jobAttribsDict[attribKey].strip()
		jobAttribsDict[attribKey] = re.sub(self.__class__.multipleWhiteSpacesPattern, " ", jobAttribsDict[attribKey])
		jobAttribsDict[attribKey] = re.sub(self.__class__.newlinePattern, " ", jobAttribsDict[attribKey])
	    jobsDict[jobDetailsPageUrl] = Job(jobAttribsDict)
	    detailsCounter += 1
	self.jobsByIndustry[industry] = jobsDict
	return(jobsDict)


# Driver implementation
def main(cfgPath="./conf/jobscraper.cfg"):
    tjObj = TimesJobsScraper(cfgPath)
    log = None
    try:
    	log = Logger(tjObj.logPath + os.path.sep + sys.argv[0][0:-2] + "log")
    except:
	print "Error creating log object: " + sys.exc_info()[1].__str__() + "\n"
    pageContent = tjObj.doLogin('testusertimesjobs@yahoo.com', 'somesecret')
    searchPageUrl = "http://www.timesjobs.com/candidate/job-search.html"
    pageContent = tjObj.getSearchInterfacePage(searchPageUrl)
    industriesDict = tjObj.getJobListingUrlsByIndustry()
    industryJobCountsDict = tjObj.getIndustryWiseJobCounts()
    log.write("Industrywise Job Counts:\n===================================\n\n")
    for indName in industryJobCountsDict.keys():
	log.write("'%s' :  %s\n"%(indName, industryJobCountsDict[indName].__str__()))
    log.write("Done with job counts...\nPreparing to dump data...\n")
    industryNamesList = industriesDict.keys()
    industryNamesList.sort() # Sorting helps run tests quickly as industries with names starting with 'A' have fewer jobs listed.
    for indName in industryNamesList:
	print "Preparing to handle '%s' industry...\n"%indName
	if log is not None:
	    log.write("Preparing to handle '%s' industry...\n"%indName)
	tjObj.jobsByIndustry[indName] = tjObj.getJobsDict(indName, log)
	indFilename = "./testdumps/TimesJobsIndustries/" + JobScraper.JobScraper.generateContextualFilename(indName, "csv")
	fh = open(indFilename, "w")
    	Job.dumpHeader(fh, "csv")
    	# Iterate over job objects in tjObj.jobsByIndustry[indName] to dump the collected info as CSV.
	for joburl in tjObj.jobsByIndustry[indName].keys():
	    jobObj = tjObj.jobsByIndustry[indName][joburl]
	    jobObj.dumpData(fh, "csv")
	fh.close()
	if log is not None:
	    log.write("Done with '%s' industry\n=========================================\n"%indName)
	print "Completed '%s' industry jobs\n=========================================\n"%indName
    log.write("\nSuccessfully dumped data for all industries...\n Terminating session.\n\n")
    



# Entry point
if __name__ == "__main__":
    main()

