import os, sys, re, time, gzip
import simplejson as json
import urllib, urllib2, httplib
from BeautifulSoup import BeautifulSoup
from urlparse import urlparse, urlsplit
import StringIO
import mimetypes, mimetools
from ConfigParser import ConfigParser
import JobScraper
from LogHandler import Logger
from Jobs import Job



class NaukriScraper(JobScraper.JobScraper):
    DEBUG = True
    # Website specific regexes:
    emailIdPattern = re.compile(r"^[\w\-]+\@[\w\-]+\.[\w]+")
    metaContentPattern = re.compile("0;\s+url=\'([^\']+)\'", re.IGNORECASE | re.MULTILINE | re.DOTALL)
    openSqrBrckPattern = re.compile(r"\[\w+")
    jobCountPattern = re.compile("<em>\s*\((\d+)\)\s*<\/em>", re.IGNORECASE | re.MULTILINE | re.DOTALL)
    industryNamePattern = re.compile("^\s*([^<]+)\s*<em>", re.IGNORECASE | re.MULTILINE | re.DOTALL)
    # Patterns used in parsing job details:
    keySkillsPattern = re.compile("Keyskills:", re.MULTILINE | re.DOTALL)
    rolePattern = re.compile("Role:", re.MULTILINE | re.DOTALL | re.IGNORECASE)
    functionPattern = re.compile(r"Functional\s+Area:", re.MULTILINE | re.DOTALL | re.IGNORECASE)
    emailAddressContactPattern = re.compile(r"Email\s+Address:", re.MULTILINE | re.DOTALL | re.IGNORECASE)
    contactDetailsLiteralPattern = re.compile("Contact\s+Details", re.IGNORECASE)
    multipleSpacesPattern = re.compile(r"[\s\n]+")
    def __init__(self, cfgFile):
	super(NaukriScraper, self).__init__(cfgFile, "http://www.naukri.com/", "https://login.naukri.com/nLogin/Login.php")
	self.httpHeaders['Referer'] = self.websiteUrl
	# The attribute 'jobsByIndustry' will be a dict of industry names to jobs under that industry. The first level keys would
	# be the industry names, the second level keys would be the URL to the job details page for each job, and the value would
	# be a 'Job' object as defined in the module 'Jobs.py'. (The attributes of a 'Job' object are 'JobTitle', 'CompanyName', 
	# 'JobDescription', 'Locations', 'Experience', 'KeySkills', 'Function', 'Role', 'PostedOn', 'ContactDetails' and 'JobUrl')
	self.jobsByIndustry = {}
	self.industryWiseCounts = {}
	self.industryLinks = {}
	self.requestId = ""

    # Method to login into a account (as job seeker) on naukri.com
    # The account credentials are passed as arguments to the method.
    # Both arguments ('username' and 'password') are mandatory. 
    # The return value is the HTML content of the page that appears
    # immediately after logging in. The same content is also stored
    # and accessible as '<object>.currentPageContent'. 
    # Note: the value of the 'username' parameter may be an email Id.
    def doLogin(self, username, password):
	formData = self._getLoginFormElementsDict()
	formData["USERNAME"] = username
	formData["PASSWORD"] = password
	if self.__class__.emailIdPattern.search(username):
	    formData["matchEmail"] = '1'
	else:
	    formData["matchEmail"] = '2'
	self.postData = urllib.urlencode(formData)
	httpHeaders = {}
	for hdrkey in self.httpHeaders.keys():
	    if hdrkey == 'Keep-Alive':
		continue
	    httpHeaders[hdrkey] = self.httpHeaders[hdrkey]
	httpHeaders['Cache-Control'] = "max-age=0"
	httpHeaders['Content-Type'] = "application/x-www-form-urlencoded"
	httpHeaders['Content-Length'] = len(self.postData)
	httpHeaders['Origin'] = "https://login.naukri.com"
	httpHeaders['Accept-Encoding'] = "gzip,deflate,sdch"
	httpHeaders['Accept-Charset'] = "ISO-8859-1,utf-8;q=0.7,*;q=0.3"
	self.pageRequest = urllib2.Request(self.requestUrl, self.postData, httpHeaders)
	try:
            self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
	    self.sessionCookies = self.updateCookies(self.pageResponse)
	    if self.__class__.DEBUG:
	        print "Cookies Received: " + self.sessionCookies.__str__()
	    httpHeaders["Cookie"] = self.sessionCookies
        except:
            print "naukri.py: " + sys.exc_info()[1].__str__()
	# In this response we also expect a 'Location' header
	responseHeaders = self.pageResponse.info()
	if responseHeaders.getheader('Location'):
	    self.requestUrl = responseHeaders.getheader('Location')
	else:
	    sys.exit()
	# Delete the keys 'Origin', 'Content-Length' and 'Content-Type' from httpHeaders
	del httpHeaders['Origin']
	del httpHeaders['Content-Length']
	del httpHeaders['Content-Type']
	for hkey in self.httpHeaders.keys():
	    if httpHeaders.has_key(hkey):
	    	self.httpHeaders[hkey] = httpHeaders[hkey] # Set the value for the key in 'self.httpHeaders'...
		del httpHeaders[hkey] # ...and now delete this key from 'httpHeaders'
	    else:
		del self.httpHeaders[hkey] # Delete all keys that are not present in 'httpHeaders'
	# Now there might be some keys in 'httpHeaders' that have not been transferred to 'self.httpHeaders'. Set them for 'self.httpHeaders'
	for hkey in httpHeaders.keys():
	    self.httpHeaders[hkey] = httpHeaders[hkey]
	    del httpHeaders[hkey]
	self.pageRequest = urllib2.Request(self.requestUrl, None, self.httpHeaders)
	try:
            self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
        except:
            print __file__.__str__() + ": Couldn't fetch page due to limited connectivity. Please check your internet connection and try again - " + sys.exc_info()[1].__str__()
        self.httpHeaders["Referer"] = self.requestUrl
	redirectUrl = None
        self.currentPageContent = self.__class__._decodeGzippedContent(self.getPageContent())
	redirect_soup = BeautifulSoup(self.currentPageContent)
	metaTag = redirect_soup.find("meta", {'http-equiv' : 'refresh'})
	if metaTag.has_key("content"):
	    metaContent = metaTag['content']
	    metaSearch = self.__class__.metaContentPattern.search(metaContent)
	    if metaSearch:
		redirectUrl = metaSearch.groups()[0]
	if redirectUrl is not None:
	    self.requestUrl = redirectUrl
	else:
	    print "Could not fetch data from the first redirection to %s"%(self.requestUrl)
	    return(None)
	self.pageRequest = urllib2.Request(self.requestUrl, None, self.httpHeaders)
	try:
            self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
	    self.httpHeaders["Referer"] = self.requestUrl
	    responseHeaders = self.pageResponse.info()
	    if responseHeaders.getheader('Location'):
	    	self.requestUrl = responseHeaders.getheader('Location')
	    else:
		self.requestUrl = None
	    self.sessionCookies = self.updateCookies(self.pageResponse)
	    self.httpHeaders['Cookie'] = self.sessionCookies
	except:
            print __file__.__str__() + ": Error while processing login request - " + sys.exc_info()[1].__str__()
        if self.requestUrl is None:
	    print "Some error occurred and the appropriate header was not received at the 3rd request.\n"
	    return None
	for hdrKey in self.httpHeaders.keys():
	    if hdrKey == "Cache-Control" or hdrKey == "Referer" or hdrKey == "Keep-Alive" or hdrKey == "Origin":
		del self.httpHeaders[hdrKey]
	# Add a '/' before the '?' in the request Url if there isn't one already.
	urlparts = self.requestUrl.split('?')
	self.requestUrl = urlparts[0] + '/?' + urlparts[1]
	self.pageRequest = urllib2.Request(self.requestUrl, None, self.httpHeaders)
	try:
            self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
	    self.sessionCookies = self.updateCookies(self.pageResponse)
	    self.httpHeaders['Cookie'] = self.sessionCookies
        except:
            print __file__.__str__() + ": Error while processing login request - " + sys.exc_info()[1].__str__()
        self.httpHeaders["Referer"] = self.requestUrl 
	self.currentPageContent = self.__class__._decodeGzippedContent(self.getPageContent())
	return(self.currentPageContent)


    # Method to retrieve the form elements and their values
    # from the login form. This is supposed to be called from
    # 'doLogin' method. Return value is a dictionary with the
    # element names as keys and their values (if available)
    # as values. This method also sets the values of self.requestUrl
    # and self.requestMethod to the values of the 'action' and 
    # 'method' attributes of the form respectively.
    def _getLoginFormElementsDict(self):
	soup = None
	try:
	    soup = BeautifulSoup(self.currentPageContent)
	except:
	    self.currentPageContent = self.__class__.sanitizePageHTML(self.currentPageContent)
	    soup = BeautifulSoup(self.currentPageContent)
	loginForm = soup.find("form", {'name' : 'login' })
	if not loginForm.has_key("action"):
	    self.requestUrl = ""
    	else:
	    self.requestUrl = loginForm["action"]
	if self.requestUrl == "":
	    self.requestUrl = self.loginPageUrl + "?msg=0&URL=http%3A%2F%2Fmy.naukri.com"
	if not self.__class__._isAbsoluteUrl(self.requestUrl):
	    self.requestUrl = self.baseUrl + os.path.sep + self.requestUrl
	self.requestMethod = loginForm["method"]
	allInputs = loginForm.findAll("input")
	formElementsDict = {}
	for inputTag in allInputs:
	    if inputTag.has_key("type") and inputTag["type"] == "radio":
		if inputTag.has_key("name") and inputTag["name"] == "matchEmail":
		    formElementsDict["matchEmail"] = 2
	    if inputTag.has_key("name") and inputTag["name"] == "formSubmitted":
		formElementsDict[inputTag["name"]] = '1'
	    elif inputTag.has_key("name") and inputTag["name"] == "URL":
		formElementsDict[inputTag["name"]] = 'http%253A%252F%252Fmy.naukri.com'
	    else:
	    	if inputTag.has_key("name"):
		    formElementsDict[inputTag["name"]] = ""
	    	if inputTag.has_key("value"):
		    formElementsDict[inputTag["name"]] = inputTag["value"]
	return(formElementsDict)


    def updateCookies(self, lastHttpResponse):
	cookies = ""
	cookiesDict = {}
	testCookiePattern = re.compile(r"test=naukri\.com", re.IGNORECASE)
        lastResponseHeaders = lastHttpResponse.info()
        responseCookies = lastResponseHeaders.getheaders("Set-Cookie")
        if responseCookies.__len__() > 1:
            for cookie in responseCookies:
                cookieParts = cookie.split(";")
		cookieName, cookieValue = cookieParts[0].split("=")
		cookieName = cookieName.strip()
		cookieValue = cookieValue.strip()
		cookiesDict[cookieName] = cookieValue
	sessCookiesList = self.httpHeaders['Cookie'].split(";")
	for sessCookie in sessCookiesList:
	    sessCookie = sessCookie.strip()
	    if sessCookie == "":
		continue
	    cookieName, cookieValue = sessCookie.split("=")
	    cookieName = cookieName.strip()
	    cookieValue = cookieValue.strip()
	    if cookieValue == "deleted":
		continue
	    if not cookiesDict.has_key(cookieName):
		cookiesDict[cookieName] = cookieValue
	for cookieName in cookiesDict.keys():
	    cookies += cookieName + "=" + cookiesDict[cookieName] + ";"
        return(cookies)


    # This method accepts a URL as a parameter and attempts to navigate to the
    # page pointed to by that URL using HTTP GET. It supports redirections using
    # 'Location' headers (calls itself recursively if 'Location' header is present. 
    def navigatePage(self, pageUrl):
	self.requestUrl = pageUrl
	self.pageRequest = urllib2.Request(self.requestUrl, None, self.httpHeaders)
	try:
            self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
	    self.sessionCookies = self.updateCookies(self.pageResponse)
	    self.httpHeaders['Cookie'] = self.sessionCookies
        except:
            print __file__.__str__() + ": Error while processing GET request to " + pageUrl + " - " + sys.exc_info()[1].__str__()
	    return (None)
        self.httpHeaders["Referer"] = self.requestUrl
	self.currentPageContent = self.__class__._decodeGzippedContent(self.getPageContent())
	responseHeaders = self.pageResponse.info()
	if responseHeaders.getheader('Location'):
	    self.requestUrl = responseHeaders.getheader('Location')
	else:
	    return (self.currentPageContent)
	self.currentPageContent = self.navigatePage(self.requestUrl)
	return(self.currentPageContent)


    # This method is similar to 'navigatePage()', but it doesn't set the caller's object
    # attributes. So, after calling this method you will still have your object's 
    # 'requestUrl', 'pageRequest', 'pageResponse', 'httpHeaders', 'sessionCookies' and
    # 'currentPageContent' attributes unchanged. 'navigatePage()' changes them all, but
    # fetchPage doesn't. It simply fetches the page pointed to by the URL ('pageUrl')
    # passed to it using the state of the caller object, and returns the HTML (text)
    # content of the page. However, it does handle redirection like 'navigatePage()'.
    def fetchPage(self, pageUrl):
	httpRequest = urllib2.Request(pageUrl, None, self.httpHeaders)
	pageContent = None
	nextUrl = None
	try:
	    httpResponse = self.no_redirect_opener.open(httpRequest)
	    responseHeaders = httpResponse.info()
	    if responseHeaders.getheader('Location'):
	    	nextUrl = responseHeaders.getheader('Location')
	    else:
		pageContent = self.__class__._decodeGzippedContent(httpResponse.read())
	    	return (pageContent)
	except:
            print __file__.__str__() + ": Error while processing GET request to " + pageUrl + " - " + sys.exc_info()[1].__str__()
	    return (None)
	if nextUrl is not None:
	    pageContent = self.fetchPage(nextUrl)
	pageContent = pageContent.decode("ascii", "ignore")
	return pageContent

    # This method creates a dictionary containing the names of all industries retrieved from the
    # page listing the industries. The names of the industries are the keys of the dictionary and
    # the URL to the starting page of job listing for each of them are the values. Additionally, it
    # also creates another dictionary containing the count of jobs posted for each of the listed
    # industries. For this dictionary too, the names of the industries are the keys and the counts
    # of jobs under them are the values. The method returns the first dictionary (industry names
    # to job listing URLs) and sets the value of the object attribute 'industryWiseCounts' to
    # the second dictionary. (self.industryWiseCounts is set)
    def _extractIndustriesLinks(self):
	self.industryLinks = {}
	pageContent = self.currentPageContent
	soup = None
	try:
	    soup = BeautifulSoup(pageContent)
	except:
	    pageContent = self.__class__.sanitizePageHTML(pageContent)
	    soup = BeautifulSoup(pageContent)
	indDivTag = soup.find("div", {'id' : 'indDiv'})
	if not indDivTag:
	    print "Could not find the 'div' for the industries list. Possibly fetched a wrong page.\n"
	    return None
	allIndAnchors = indDivTag.findAll("a")
	for anchor in allIndAnchors:
	    if anchor is not None and anchor.has_key("href"):
	    	indUrl = anchor["href"]
	    	indContents = anchor.renderContents()
		industryNameSearch = self.__class__.industryNamePattern.search(indContents)
		indName = None
		if industryNameSearch:
		    indName = industryNameSearch.groups()[0]
		    indName = indName.strip()
		if indName is None:
		    continue
		jobCountSearch = self.__class__.jobCountPattern.search(indContents)
		jobCount = None
		if jobCountSearch:
		    jobCount = jobCountSearch.groups()[0]
		self.industryLinks[indName] = indUrl
		self.industryWiseCounts[indName] = jobCount
	    else:
		continue # Basically do nothing.
	return(self.industryLinks)


    # Wrapper over _extractIndustriesLinks so that we can use it
    # in 'main()'. ('_extractIndustriesLinks' could also be used
    # directly from 'main()', but since it was written to be used
    # as a private method, we created this wrapper to make use of
    # it externally.
    def getJobListingUrlsByIndustry(self):
	self._extractIndustriesLinks()
	return(self.industryLinks)

    # Extracts the URL of the next jobs listing page. 
    # This should be called only in the context of 
    # retrieving job details pages for a specific industry.
    def getNextPageUrl(self, startPageUrl, pageNum):
	prevPageNum = pageNum - 1
	delimString = "px=" + prevPageNum.__str__()
	startPageUrlParts = startPageUrl.split(delimString)
	if startPageUrlParts.__len__() == 2:
	    startPageUrlPart_1 = startPageUrlParts[0]
	    startPageUrlPart_2 = startPageUrlParts[1]
	    nextPageUrl = startPageUrlPart_1 + "px=" + pageNum.__str__() + startPageUrlPart_2
	elif startPageUrlParts.__len__() == 1:
	    startPageUrlPart_1 = startPageUrlParts[0]
	    nextPageUrl = startPageUrlPart_1 + "&px=" + pageNum.__str__()
	else:
	    nextPageUrl = None
	return(nextPageUrl)


    # Fetch the contents of the job details page.
    def _getJobDetailsPage(self, detailsUrl):
	jobDetailsPageContent = self.fetchPage(detailsUrl)
	return(jobDetailsPageContent)


    # Parse a page containing jobs listing and extract all the URLs to the
    # job details page for each of the jobs listed in that page.
    # Returns a list of URLs to all the jobs listed on the page.
    def _getJobLinksFromPage(self, pageContent, logObj=None):
	jobLinksList = []
	soup = None
	try:
	    soup = BeautifulSoup(pageContent)
	except:
	    if logObj is not None:
		logObj.write("Page content was insane... Sanitizing it\n")
	    pageContent = self.__class__.sanitizePageHTML(pageContent)
	    soup = BeautifulSoup(pageContent)
	allJobAnchors = soup.findAll("a", {'class' : 'l_j'})
	for jobAnchor in allJobAnchors:
	    if jobAnchor.has_key("href"):
		jobLinksList.append(jobAnchor['href'])
	return(jobLinksList)


    # This method parses a job details page and returns
    # a dictionary. The keys are the various attributes
    # supported by the 'Job' class. 
    def parseJobDetailsPage(self, pageContent):
	jobAttribs = {}
	if not pageContent or pageContent == "":
	    return jobAttribs
	soup = None
	try:
	    soup = BeautifulSoup(pageContent)
	except:
	    pageContent = self._handleCustomShitInHTML(pageContent)
	    pageContent = pageContent.decode("ascii", "ignore")
	    pageContent = self.__class__.sanitizePageHTML(pageContent)
	    soup = BeautifulSoup(pageContent)
	jobTitleTag = soup.find("strong", {'class' : 'jobTitle'})
	try:
	    jobAttribs['JobTitle'] = jobTitleTag.renderContents().strip()
	except:
	    jobAttribs['JobTitle'] = ""
	jobAttribs['JobTitle'] = re.sub(self.__class__.multipleSpacesPattern, " ", jobAttribs['JobTitle'])
	jobDetailsTag = soup.find("div", {'class' : 'jobDet'})
	try:
	    companyNameText = jobDetailsTag.renderContents()
	except:
	    companyNameText = ""
	companyNameText = re.sub(self.__class__.htmlTagPattern, "", companyNameText)
	companyNameText = re.sub(re.compile("^\s*by\s+"), "", companyNameText)
	companyNameTextParts = companyNameText.split(" in ")
	jobAttribs['CompanyName'] = companyNameTextParts[0]
	jobAttribs['Locations'] = ""
	if companyNameTextParts.__len__() == 2:
	    jobAttribs['Locations'] = companyNameTextParts[1]
	elif companyNameTextParts.__len__() > 2:
	    jobAttribs['Locations'] = " in ".join(companyNameTextParts[1:])
	jobAttribs['CompanyName'] = re.sub(self.__class__.multipleSpacesPattern, " ", jobAttribs['CompanyName'])
	jobAttribs['Locations'] = re.sub(self.__class__.multipleSpacesPattern, " ", jobAttribs['Locations'])
	jobExpDateTag = soup.find("div", {'class' : 'f13'})
	try:
	    jobExpDateTagContent = jobExpDateTag.renderContents()
	except:
	    jobExpDateTagContent = ""
	jobExpDateTagContent = re.sub(self.__class__.htmlTagPattern, "", jobExpDateTagContent).strip()
	experiencePostedPattern = re.compile(r"Experience:\s*(.*)Posted on:\s*(.*)\s*$")
	experiencePostedSearch = experiencePostedPattern.search(jobExpDateTagContent)
	experience = ""
	postedOn = ""
	if experiencePostedSearch:
	    experience, postedOn = experiencePostedSearch.groups()
	experience = re.sub(self.__class__.multipleSpacesPattern, " ", experience)
	postedOn = re.sub(self.__class__.multipleSpacesPattern, " ", postedOn)
	jobAttribs['Experience'] = experience
	jobAttribs['PostedOn'] = postedOn
	allJobDescDivs = soup.findAll("div", {'class' : 'jdRow'})
	jobAttribs['KeySkills'] = ""
	jobAttribs['Function'] = ""
	jobAttribs['Role'] = ""
	jobAttribs['ContactDetails'] = ""
	for jdDiv in allJobDescDivs:
	    jdDivContents = ""
	    if jdDiv is not None:
		jdDivContents = jdDiv.renderContents()
	    keySkillsSearch = self.__class__.keySkillsPattern.search(jdDivContents)
	    roleSearch = self.__class__.rolePattern.search(jdDivContents)
	    functionSearch = self.__class__.functionPattern.search(jdDivContents)
	    emailAddressContactSearch = self.__class__.emailAddressContactPattern.search(jdDivContents)
	    if keySkillsSearch:
		keySkillsParagraph = jdDiv.find("p")
		if keySkillsParagraph is not None:
		    jobAttribs['KeySkills'] = keySkillsParagraph.renderContents()
		    jobAttribs['KeySkills'] = re.sub(self.__class__.multipleSpacesPattern, " ", jobAttribs['KeySkills'])
	    if roleSearch:
		roleSearchParagraph = jdDiv.find("p")
		if roleSearchParagraph is not None:
		    jobAttribs['Role'] = roleSearchParagraph.renderContents()
		    jobAttribs['Role'] = re.sub(self.__class__.multipleSpacesPattern, " ", jobAttribs['Role'])
	    if functionSearch:
		functionSearchParagraph = jdDiv.find("p")
		if functionSearchParagraph is not None:
		    jobAttribs['Function'] = functionSearchParagraph.renderContents()
		    jobAttribs['Function'] = re.sub(self.__class__.multipleSpacesPattern, " ", jobAttribs['Function'])
	    if emailAddressContactSearch:
		emailAddressContactAnchorText = jdDiv.find("a")
		if emailAddressContactAnchorText is not None:
		    jobAttribs['ContactDetails'] = emailAddressContactAnchorText.renderContents()
	contactDetailsDiv = soup.find("div", {'class' : 'contactInfo'})
	try:
	    contactDetailsContent = contactDetailsDiv.renderContents()
	except:
	    contactDetailsContent = ""
	contactDetailsContent = re.sub(self.__class__.contactDetailsLiteralPattern, "", contactDetailsContent)
	contactDetailsContent = re.sub(self.__class__.htmlTagPattern, "", contactDetailsContent).strip()
	if jobAttribs['ContactDetails'] != "":
	    jobAttribs['ContactDetails'] += ", " + contactDetailsContent
	else:
	    jobAttribs['ContactDetails'] = contactDetailsContent
	jobAttribs['ContactDetails'] = re.sub(self.__class__.multipleSpacesPattern, " ", jobAttribs['ContactDetails'])
	jobAttribs['JobDescription'] = ""
	jobDescDivsList = soup.findAll("div", {'class' : 'jdDesc'})
	for jobDescDiv in jobDescDivsList:
	    if not jobDescDiv:
		continue
	    jobDescDivContents = jobDescDiv.renderContents()
	    jobDescDivContents = re.sub(self.__class__.htmlTagPattern, "", jobDescDivContents)
	    jobAttribs['JobDescription'] += jobDescDivContents.strip()
	jobAttribs['JobDescription'] = re.sub(self.__class__.multipleSpacesPattern, " ", jobAttribs['JobDescription'])
	jobAttribs['JobUrl'] = ""
	jobUrlAnchor = soup.find("a", {'title' : 'Email this Job'})
	if jobUrlAnchor and jobUrlAnchor.has_key("href"):
	    jobAttribs['JobUrl'] = jobUrlAnchor["href"]
	return(jobAttribs)



    # This is a method that extracts all the shit that naukri's html
    # contains. The shit causes 'BeautifulSoup' to fail as it cannot
    # identify the part of the content containing the shit to be HTML
    # at all. The shit includes stuff as 'class-"some_class_name"',
    # or a tag ending as "\>". This type of shit is taken care of by
    # this method, and this method may need to be changed as and when 
    # naukri wishes to change their shit.
    # Note: Please don't remove the existing shit removal operations
    # when adding more such operations. 
    def _handleCustomShitInHTML(self, htmlContent):
	shitEscapePattern = re.compile(r"\\[\'\"]", re.MULTILINE | re.DOTALL)
	shitTagEndPattern = re.compile(r"\\>", re.MULTILINE | re.DOTALL)
	shitAttribPattern = re.compile(r"class-\"srtBdr\"", re.MULTILINE | re.DOTALL)
	htmlContent = re.sub(shitEscapePattern, "\"", htmlContent)
	htmlContent = re.sub(shitTagEndPattern, "/>", htmlContent)
    	htmlContent = re.sub(r"class-\"srtBdr\"", 'class="srtBdr"', htmlContent)
	f = open("/home/supmit/work/odesk/SalesForceCVSearch/testdumps/naukriPageDump.html", "w")
	f.write(htmlContent)
	f.close()
	return(htmlContent)

    # This method extracts all jobs info for a specified
    # industry (passed as an argument). It handles extraction
    # of jobs info from multiple pages of listing, gets the
    # jobs details by extracting info from the job details 
    # page and creates a 'Job' object for each job processed.
    # It returns the info as a dictionary, whose keys are the
    # URLs to the job details pages, and the values are the
    # 'Job' objects created using the info for each job. It 
    # gets the URL to the first page of the job listing for 
    # the specified industry by using the 'industryLinks'
    # object attribute populated by the call to 'getJobListingUrlsByIndustry'.
    def getJobsDict(self, industry, logObj=None):
	print "Processing industry '%s'..."%industry
	jobsDict = {}
	if logObj is not None:
	    logObj.write("Processing industry '%s\n'..."%industry)
	industryUrl = self.industryLinks[industry]
	startPageUrl = industryUrl
	pageNum = 1
	# Get the first page of job listing for the particular industry we are handling in this iteration.
	jobsPageContent = self.fetchPage(industryUrl)
	jobsPageContent = self._handleCustomShitInHTML(jobsPageContent)
	allJobUrlsList = []
	errorDivPattern = re.compile(r"disp\s+fl\s+searchHd")
	errorPagePattern = re.compile(r"(\d+)\-(\d+)\s+of\s+<strong>\d+<\/strong>")
	lastEndRange = 50 # Assuming we are displayed 50 job entries per page (which is the default view)
	while jobsPageContent is not None:
	    jobUrlsList = self._getJobLinksFromPage(jobsPageContent)
	    if jobUrlsList.__len__() > 0:
		if logObj is not None:
		    logObj.write("Found %s jobs from page #%s ... \n"%(jobUrlsList.__len__().__str__(), pageNum.__str__()))
		allJobUrlsList.extend(jobUrlsList)
	    pageNum += 1
	    nextPageUrl = self.getNextPageUrl(startPageUrl, pageNum)
	    if nextPageUrl and self.__class__.DEBUG:
		print "Next page URL: %s\n"%nextPageUrl
	    logObj.write("Processing page: %s\n"%pageNum.__str__())
	    jobsPageContent = None
	    if nextPageUrl is not None:
		jobsPageContent = self.fetchPage(nextPageUrl)
	    soup = None
	    try:
		soup = BeautifulSoup(jobsPageContent)
	    except:
		logObj.write("Found insane HTML content at page # %s: %s\n"%(pageNum.__str__(), sys.exc_info()[1].__str__()))
		jobsPageContent = self._handleCustomShitInHTML(jobsPageContent)
		soup = BeautifulSoup(jobsPageContent)
	    jobCountDiv = soup.find("div", {'class' : 'disp fl searchHd'})
	    if not jobCountDiv: # Could not identify the range of results displayed on this page... break out of while loop
		logObj.write("Warning: Could not identify the range of results displayed on this page.\nTerminating jobs listing page iteration\n")
		print "Warning: Could not identify the range of results displayed on this page.\nTerminating jobs listing page iteration"
		break
	    jobCountSearch = errorPagePattern.search(jobCountDiv.renderContents())
	    if not jobCountSearch:
		logObj.write("Warning: Could not identify the range of results displayed on this page.\nTerminating jobs listing page iteration\n")
		print "Warning: Could not identify the range of results displayed on this page.\nTerminating jobs listing page iteration"
		break
	    startRange = jobCountSearch.groups()[0]
	    endRange = jobCountSearch.groups()[1]
	    print "Start Range : %s, End Range: %s\n"%(startRange, endRange)
	    if lastEndRange == int(endRange): # We haven't been able to navigate to the next page... something's wrong
		logObj.write("Couldn't get to the next page... last page also listed jobs upto %s\n"%(endRange))
		print "Couldn't get to the next page... last page also listed jobs upto %s\n"%(endRange)
		break
	    else:
		lastEndRange = int(endRange)
	    if int(startRange) > int(endRange): # Gone past all the jobs listed for the industry - set loop terminating condition.
		jobsPageContent = None
	# At this point we have URLs to the job detail pages for all the jobs listed for this industry in the variable 'allJobUrlsList'.
	# We will now iterate over each element of 'allJobUrlsList' and fetch the details of each job.
	# TODO: This part needs to be processed by multiple threads.
	for jobDetailsPageUrl in allJobUrlsList:
	    jobDetailsPageContent = self._getJobDetailsPage(jobDetailsPageUrl)
	    if logObj is not None:
		logObj.write("Parsing job details from page '%s'...\n"%jobDetailsPageUrl)
	    jobAttribsDict = self.parseJobDetailsPage(jobDetailsPageContent)
	    for attribKey in jobAttribsDict.keys():
		jobAttribsDict[attribKey] = jobAttribsDict[attribKey].decode("ascii", "ignore")
		jobAttribsDict[attribKey].strip()
	    jobsDict[jobDetailsPageUrl] = Job(jobAttribsDict)
	return(jobsDict)




# By default, this script will run for all industries listed in naukri.com.
# TODO: Provide means to run the script for one or more industries specified by the user.
def main(cfgPath="./conf/jobscraper.cfg"):
    naukriObj = NaukriScraper(cfgPath)
    log = None
    try:
    	log = Logger(naukriObj.logPath + os.path.sep + sys.argv[0][0:-2] + "log")
    except:
	print "Error creating log object: " + sys.exc_info()[1].__str__() + "\n"
    userId = "testusernaukri@yahoo.com"
    passwd = "somesecret"
    log.write("\nAttempting to login into naukri.com with credentials '%s/%s'\n"%(userId, passwd))
    naukriObj.doLogin(userId, passwd)
    if naukriObj.assertLogin("Logout"):
	print "Successfully logged in as %s\n"%userId
	log.write("Successfully logged in as %s\n"%userId)
    else:
	print "Could not log in as %s\n"%userId
	log.write("Could not log in as %s\n"%userId)
    log.write("Trying to navigate to industries listing page...\n")
    industriesListingPageUrl = "http://jobsearch.naukri.com/mynaukri/mn_newsmartsearch.php?xz=22_0_1"
    pageContent = naukriObj.navigatePage(industriesListingPageUrl)
    # So at this point, naukriObj.currentPageContent contains the page that lists all the industries. 
    # So, we now fetch the list of industries as well as the URLs to the pages where all jobs pertaining
    # to each industry are listed.
    log.write("Retrieved industries listing page\nTrying to extract industry names and URLs... ")
    industryUrlsDict = naukriObj.getJobListingUrlsByIndustry()
    log.write("got it\nStarting to process industries one by one...\n")
    # TODO: May consider threading out from this point on since there are lots of industries. Each thread
    # may handle a single industry, so that we may process multiple industries simultaneously. 
    for industryName in industryUrlsDict.keys():
	industryUrl = industryUrlsDict[industryName]
	log.write("Processing '%s' with %s jobs... "%(industryName, naukriObj.industryWiseCounts[industryName]))
	# Retrieve all the jobs info for industry.
	jobsDict = naukriObj.getJobsDict(industryName, log)
	naukriObj.jobsByIndustry[industryName] = jobsDict # Not very useful as we are going to dump the results here.
	indFilename = "./testdumps/NaukriIndustries/" + JobScraper.JobScraper.generateContextualFilename(industryName, "csv")
	fh = open(indFilename, "w")
    	Job.dumpHeader(fh, "csv")
	for jurl in jobsDict.keys():
	    jobObj = jobsDict[jurl]
	    jobObj.dumpData(fh, "csv")
	fh.close()
	if log is not None:
	    log.write("Done with '%s' industry\n=========================================\n"%industryName)
	print "Completed '%s' industry jobs\n=========================================\n"%industryName
	



if __name__ == "__main__":
    main()

