import os, sys, re, time, gzip
import simplejson as json
import urllib, urllib2, httplib
from BeautifulSoup import BeautifulSoup
from urlparse import urlparse, urlsplit
import StringIO
import mimetypes, mimetools
from ConfigParser import ConfigParser
import JobScraper
from Jobs import Job
from LogHandler import Logger
#from Spidermonkey import Runtime



class DiceScraper(JobScraper.JobScraper):
    DEBUG = True
    daysOfWeek = {'0' : 'Sunday', '1' : 'Monday', '2' : 'Tuesday', '3' : 'Wednesday', '4' : 'Thursday', '5' : 'Friday', '6' : 'Saturday'}
    def __init__(self, cfgFile):
	super(DiceScraper, self).__init__(cfgFile, "http://www.dice.com/", "http://seeker.dice.com/profman/servlet/ProfMan?op=3000&pg=1000")
	self.httpHeaders['Referer'] = self.websiteUrl
	self.searchPageContentsDict = {}
	self.jobsByCountry = {}

    def _getCookieFromResponse(cls, lastHttpResponse):
        cookies = ""
	#print "get cookie is dice.py"
        lastResponseHeaders = lastHttpResponse.info()
        responseCookies = lastResponseHeaders.getheaders("Set-Cookie")
        if responseCookies.__len__() > 1:
            for cookie in responseCookies:
                cookieParts = cookie.split(";")
                cookies += cookieParts[0] + "; "
                #print "get cookie in dice.py" + cookieParts[0]
            return(cookies)
	else:
	    cookies = super(DiceScraper, cls)._getCookieFromResponse(lastHttpResponse)
	    return(cookies)
    
    _getCookieFromResponse = classmethod(_getCookieFromResponse)

    #s_cc=true; s_evar16=1%3A00PM; s_evar17=Tuesday; s_evar18=Weekday; s_nr=1337109812853; s_evar19=New; s_lastvisit=1337109812855
    #s_evar16=2%3A00PM; s_evar17=Wednesday; s_evar18=Weekday; s_nr=1339014291060; s_evar19=New; s_lastvisit=1339014291062
    def _createAllNecessaryCookies(self):
	current_time = time.localtime()
	hour = current_time[3]
	minute = current_time[4]
	suffix = "AM"
	if hour > 12:
	    hour = 24 - hour
	    suffix = "PM"
	s_evar16 = "s_evar16=" + hour.__str__() + "%3A" + minute.__str__() + suffix
	s_evar17 = "s_evar17=" + self.__class__.daysOfWeek[current_time[6].__str__()]
	s_evar18 = "s_evar18=Weekday"
	if current_time[6] == 0 or current_time[6] == 6:
	    s_evar18 = "s_evar18=Weekend"
	s_cc = "s_cc=true"
	s_evar19 = "s_evar19=New"
	s_nr = "s_nr=" + int(time.time() * 1000).__str__()
	s_lastvisit = "s_lastvisit=" + (int(time.time() * 1000) + 2).__str__()
	cookieString = s_cc + "; " + s_evar16 + "; " + s_evar17 + "; " + s_evar18 + "; " + s_nr + "; " + s_evar19 + "; " + s_lastvisit
	# Next, make a request for the Geo-IP info
	geoip_cookie = ""
	ts = int(time.time())
	geoip_url = "http://gs.instantservice.com/geoipAPI.js?src=ii3&ts=" + ts.__str__()
	geoip_response = urllib2.urlopen(geoip_url)
	geoip_content = geoip_response.read()
	geoip_ipaddr_pattern = re.compile(r"isgeoipapi_ip_addr\s+=\s+\"([\d\.]+)\";")
	geoip_ipaddr_search = geoip_ipaddr_pattern.search(geoip_content)
	if geoip_ipaddr_search:
	    geoip_cookie += "_GeoIP-" + geoip_ipaddr_search.groups()[0]
	else:
	    geoip_cookie += "_GeoIP-"
	geoip_country_pattern = re.compile(r"isgeoipapi_country_code\s+=\s+\"(\w{2})\";")
	geoip_country_search = geoip_country_pattern.search(geoip_content)
	if geoip_country_search:
	    geoip_cookie += "_GeoCo-" + geoip_country_search.groups()[0]
	else:
	    geoip_cookie += "_GeoCo-"
	geoip_region_pattern = re.compile(r"isgeoipapi_region\s+=\s+\"(\d{2})\";")
	geoip_region_search = geoip_region_pattern.search(geoip_content)
	if geoip_region_search:
	    geoip_cookie += "_GeoRg-" + geoip_region_search.groups()[0]
	else:
	    geoip_cookie += "_GeoRg-16" # Default case: 16
	geoip_city_pattern = re.compile(r"isgeoipapi_city\s+=\s+\"([\w\s]+)\";")
	geoip_city_search = geoip_city_pattern.search(geoip_content)
	if geoip_city_search:
	    geoip_cookie += "_GeoCt-" + geoip_city_search.groups()[0]
	else:
	    geoip_cookie += "_GeoCt-Mumbai" # Default case: Mumbai 
	geoip_cookie += "_GeoNs-_GeoDm-"
	t_int = int(time.time()).__str__()
	geoip_cookie = "IS3_GSV=DPL-2_TES-" + t_int + "_PCT-" + t_int + "s_vi=[CS]v1|280A1ED70501082F-40000130A01DBB54[CE]; IS3_History=1343452288-2-97____;" + geoip_cookie + "; "
	cookieString = geoip_cookie + cookieString
	return(cookieString)
    
    def doLogin(self, username, password):
	formData = self._getLoginFormElementsDict()
	if not self.siteUsername or not self.sitePassword:
	    self.siteUsername = username
	    self.sitePassword = password
	formData["SJT_USER_NAME"] = self.siteUsername
	formData["SJT_PASSWD"] = self.sitePassword
	self.postData = urllib.urlencode(formData)
	self.httpHeaders['Cookie'] += self._createAllNecessaryCookies()
	httpHeaders = {}
	for hdrkey in self.httpHeaders.keys():
	    if hdrkey == 'Keep-Alive':
		continue
	    httpHeaders[hdrkey] = self.httpHeaders[hdrkey]
	httpHeaders['Content-Type'] = 'application/x-www-form-urlencoded'
	httpHeaders['Accept-Language'] = 'en-US,en;q=0.8'
	httpHeaders['Accept-Encoding'] = 'gzip,deflate,sdch'
	httpHeaders['Accept-Charset'] = "ISO-8859-1,utf-8;q=0.7,*;q=0.3"
	httpHeaders['Content-Length'] = len(self.postData)
	httpHeaders['Referer'] = self.loginPageUrl
	httpHeaders['Origin'] = "http://seeker.dice.com"
	httpHeaders['Cache-Control'] = "max-age=0"
	if self.__class__.DEBUG:
	    print "Request URL: " + self.requestUrl
	    print "POST Data: " + self.postData
	    print "HTTP Headers: " +  httpHeaders.__str__()
	self.pageRequest = urllib2.Request(self.requestUrl, self.postData, httpHeaders)
	try:
            self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
            self.sessionCookies = self.__class__._getCookieFromResponse(self.pageResponse)
            self.httpHeaders["Cookie"] += self.sessionCookies
	    httpHeaders["Cookie"] += self.sessionCookies
	    if self.__class__.DEBUG:
	        print "Cookies Received: " + self.sessionCookies.__str__()
        except:
            print "dice.py: Couldn't fetch page due to limited connectivity. Please check your internet connection and try again - " + sys.exc_info()[1].__str__()
	# In this response we also expect a 'Location' header
	responseHeaders = self.pageResponse.info()
	if responseHeaders.getheader('Location'):
	    self.requestUrl = responseHeaders.getheader('Location')
	else:
	    sys.exit()
	# Delete the keys 'Origin', 'Content-Length' and 'Content-Type' from httpHeaders
	del httpHeaders['Origin']
	del httpHeaders['Content-Length']
	del httpHeaders['Content-Type']
	for hkey in httpHeaders.keys():
	    self.httpHeaders[hkey] = httpHeaders[hkey]
	if self.__class__.DEBUG:
	    print responseHeaders
	    print "Redirect URL: " + self.requestUrl + "\n"
	    print "Redirect HTTP Headers: " +  self.httpHeaders.__str__() + "\n"
	self.pageRequest = urllib2.Request(self.requestUrl, None, self.httpHeaders)
	try:
            self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
            self.sessionCookies = self.__class__._getCookieFromResponse(self.pageResponse)
	    if self.__class__.DEBUG:
	    	print "Session Cookies #1: " + self.sessionCookies.__str__() + "\n"
            self.httpHeaders["Cookie"] = self.sessionCookies
        except:
            print __file__.__str__() + ": Couldn't fetch page due to limited connectivity. Please check your internet connection and try again - " + sys.exc_info()[1].__str__()
        self.httpHeaders["Referer"] = self.requestUrl
        # Initialize the account related variables...
        self.currentPageContent = self.__class__._decodeGzippedContent(self.getPageContent())
	if self.__class__.DEBUG:
	    fh=open("/home/supmit/work/odesk/SalesForceCVSearch/testdumps/dice.html", "w")
	    fh.write(self.currentPageContent)
	    fh.close()
	return(self.currentPageContent)


    def _getLoginFormElementsDict(self):
	bsoup = BeautifulSoup(self.currentPageContent)
	loginForm = bsoup.find("form", {'name' : 'INFO'})
	self.requestUrl = loginForm["action"]
	#self.requestUrl += "?op=3000"
	self.requestMethod = loginForm["method"]
	if not self.__class__._isAbsoluteUrl(self.requestUrl):
	    self.requestUrl = self.baseUrl + os.path.sep + self.requestUrl
	formElementsDict = {}
	loginFormContent = loginForm.renderContents()
	formSoup = BeautifulSoup(loginFormContent)
	allHiddenElements = formSoup.findAll("input", {'type' : 'hidden'})
	for elem in allHiddenElements:
	    formElementsDict[elem['name']] = elem['value']
	formElementsDict["SUBMIT"] = 'Submit'
	formElementsDict["SJT_USER_NAME"] = ''
	formElementsDict["SJT_PASSWD"] = ''
	formElementsDict["op"] = '2000'
	return(formElementsDict)


    # This method fetches the page containing the 'Advanced Search' feature for searching jobs.
    def getSearchInterfacePage(self):
	searchInterfaceLinkPattern = re.compile(r"Search\s+for\s+Jobs", re.IGNORECASE | re.DOTALL)
	pageContent = self.currentPageContent
	escapeQuotePattern = re.compile(r"\\\"")
	pageContent = escapeQuotePattern.sub("'", self.currentPageContent)
	soup = BeautifulSoup(pageContent)
	allAnchorTags = soup.findAll("a")
	searchInterfaceURL = ""
	for atag in allAnchorTags:
	    if searchInterfaceLinkPattern.search(atag.renderContents()) and atag.has_key("href"):
		searchInterfaceURL = atag["href"]
		break
	    else:
		continue
	if searchInterfaceURL == "":
	    return None
	if not self.__class__._isAbsoluteUrl(searchInterfaceURL):
	    searchInterfaceURL = self.baseUrl + searchInterfaceURL
	if self.__class__.DEBUG:
	    print "Search Page URL: " + searchInterfaceURL
	self.requestUrl = searchInterfaceURL
	self.pageRequest = urllib2.Request(self.requestUrl, None, self.httpHeaders)
	try:
            self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
	except:
            print __file__.__str__() + ": Couldn't fetch page due to limited connectivity. Please check your internet connection and try again - " + sys.exc_info()[1].__str__()
        self.httpHeaders["Referer"] = self.requestUrl
        self.currentPageContent = self.__class__._decodeGzippedContent(self.getPageContent())
	if self.__class__.DEBUG:
	    fh=open("/home/supmit/work/odesk/SalesForceCVSearch/testdumps/diceSearchInterface.html", "w")
	    fh.write(self.currentPageContent)
	    fh.close()
	return(self.currentPageContent)


    def _getListedCountries(self):
	pageContent = self.currentPageContent
	soup = BeautifulSoup(pageContent)
	countrySelect = soup.find("select", {'name' : 'COUNTRY' })
	options = countrySelect.findAll("option")
	countryDict = {}
	for opt in options:
	    countryName = opt.renderContents()
	    countryId = opt["value"]
	    countryDict[countryName.lower()] = countryId
	return(countryDict)

    # 'countryName' is Optional. If not specified, jobs from all countries will be listed by the search.
    # Note: This behaviour is in contradiction to how dice displays search results if country is not chosen 
    # by the user. (dice displays jobs for the United States if the user doesn't select any specific country).
    # The return value from this method is a dict of 'Job' objects by country name. This method takes care of multiple pages
    # returned as a result of searching for jobs in dice.com. (Country name is key and the list of 'Job' objects is value).
    # 'paramsDict' is a python dictionary containing other attributes used in dice for filtering the search
    # results. By default, it is an empty dictionary. This also is an optional param like 'countryName'.
    def getSearchResults(self, logobj, countryName=None, paramsDict={}):
	countryDict = self._getListedCountries()
	countryId = None
	searchUrlsDict = {}
    	if countryName and countryDict.has_key(countryName.lower()):
	    countryId = countryDict[countryName.lower()]
	requestUrl = "http://seeker.dice.com/jobsearch/servlet/JobSearch?caller=0&LOCATION_OPTION=3&EXTRA_STUFF=0&N=0&Hf=0&Ntk=JobSearchRanking&op=300&values=&FREE_TEXT=&Ntx=mode+matchall&EXCLUDE_KEY1=p_JobTitle&EXCLUDE_TEXT1=&EXCLUDE_KEY2=p_JobTitle&EXCLUDE_TEXT2=&EXCLUDE_KEY3=p_JobTitle&EXCLUDE_TEXT3=&EXCLUDE_KEY4=p_JobTitle&EXCLUDE_TEXT4=&EXCLUDE_KEY5=p_JobTitle&EXCLUDE_TEXT5=&EXCLUDE_KEY6=p_JobTitle&EXCLUDE_TEXT6=&EXCLUDE_KEY7=p_JobTitle&EXCLUDE_TEXT7=&EXCLUDE_TEXT8=&RADIUS=80.4672&WHERE=&locationRadio=on&"
	if countryId:
	    requestUrl += "COUNTRY=" + countryId + "&METRO_AREA=33.78715899%2C-84.39164034&AREA_CODES=&AC_COUNTRY=1525&TRAVEL=0&TAXTERM=0&SORTSPEC=0&FRMT=1&DAYSBACK=30&NUM_PER_PAGE=50"
	    searchUrlsDict[countryName.lower()] = requestUrl
	else:
	    for countryName in countryDict.keys():
		requestUrlCountry = requestUrl + "COUNTRY=" + countryDict[countryName] + "&METRO_AREA=33.78715899%2C-84.39164034&AREA_CODES=&AC_COUNTRY=1525&TRAVEL=0&TAXTERM=0&SORTSPEC=0&FRMT=1&DAYSBACK=30&NUM_PER_PAGE=50"
		searchUrlsDict[countryName] = requestUrlCountry
	for country in searchUrlsDict.keys():
	    url = searchUrlsDict[country]
	    logobj.write("Retrieving results for '%s': %s\n"%(country, url))
	    pageRequestCountry = urllib2.Request(url, None, self.httpHeaders)
	    try:
            	pageResponseCountry = self.no_redirect_opener.open(pageRequestCountry)
		pageContentCountry = self.__class__._decodeGzippedContent(pageResponseCountry.read())
		self.searchPageContentsDict[country] = pageContentCountry
		if self.__class__.DEBUG:
		    countryFile = self.__class__.generateContextualFilename(country, "html")
	    	    fh=open("/home/supmit/work/odesk/SalesForceCVSearch/testdumps/DiceCountries/" + countryFile, "w")
	    	    fh.write(pageContentCountry)
	    	    fh.close()
	    except:
            	print __file__.__str__() + ": Couldn't fetch page due to limited connectivity. Please check your internet connection and try again - " + sys.exc_info()[1].__str__()
	# Parse each page from 'self.searchPageContentsDict' (dict with country name as key and search result page content as value).
	# Each of these pages have multiple jobs listed and some of these pages may refer to more pages (numbered 2, 3, 4, etc).
	for country in self.searchPageContentsDict.keys():
	    page = self.searchPageContentsDict[country]
	    self.jobsByCountry[country] = self._parseSearchResultsByCountry(page, logobj)
	    logobj.write("Returned %s jobs for '%s'"%(self.jobsByCountry[country].__len__().__str__(), country))
	return (self.jobsByCountry)


    # This method retrieves the job details page for a specific job posting, using the url passed to it as 2nd argument.
    # It returns the HTML content of the details page.
    def _requestJobDetailsPage(self, detailPageUrl, logobj=None):
	jobDetailsPageRequest = urllib2.Request(detailPageUrl, None, self.httpHeaders)
	jobDetailsPageContent = None
	if logobj:
	    logobj.write("Trying to fetch job details from '%s'\n"%detailPageUrl)
	try:
	    jobDetailsPageResponse = self.no_redirect_opener.open(jobDetailsPageRequest)
	    jobDetailsPageContent = self.__class__._decodeGzippedContent(jobDetailsPageResponse.read())
	except:
	    print __file__.__str__() + ": Couldn't fetch page from " + detailPageUrl + ". Please check your internet connection and try again - " + sys.exc_info()[1].__str__()
	    if logobj:
		logobj.write(__file__.__str__() + ": Couldn't fetch page from " + detailPageUrl + ". Please check your internet connection and try again - " + sys.exc_info()[1].__str__())
	return(jobDetailsPageContent)


    # Parse a page listing jobs for a specific country and returns a list of Job objects.
    # This method handles all the pages (of search results) that are navigable from the page
    # passed in as argument.
    def _parseSearchResultsByCountry(self, page, logobj = None):
	soup = BeautifulSoup(page)
	searchDiv = soup.find("div", {'id' : 'searchResHD'})
	resultH2List = searchDiv.findAll("h2")
	searchResultHeaderPattern = re.compile(r"Search\s+results:\s+(\d+)\s+\-\s+(\d+)\s+of\s+(\d+)", re.IGNORECASE)
	multiPageFlag = False
	jobsCount = 0
	jobslist = []
	for resultH2 in resultH2List:
	    resultH2Text = resultH2.renderContents()
	    resultSearchObj = searchResultHeaderPattern.search(resultH2Text)
	    if not resultSearchObj:
		continue
	    resultH2Groups = resultSearchObj.groups()
	    jobsCount = int(resultH2Groups[2])
	    if jobsCount > 50:
		multiPageFlag = True
	    break
	if logobj:
	    logobj.write("Found %s jobs\n"%jobsCount.__str__())
	currentPageNumber = 1
	pageCount = 1
	if multiPageFlag: # We have more than one page of search results (jobs)
	    pageCount = int(jobsCount/50) + 1
	pageCtr = 1
	while pageCtr <=  pageCount:
	    alltrs = soup.findAll("tr", {'class' : 'STDsrRes'})
	    for tr in alltrs:
		alltds = tr.findAll("td")
		# Now create a job object by extracting info from all the 'td' elements. Append each of these jobs to the list named 'jobslist'.
		tdctr = 0
		jobAttribs = {}
		titleAnchor = alltds[0].find("a")
		jobAttribs['JobTitle'] = titleAnchor.renderContents() # Index 0 will contain the job profile name and the URL to the job details page.
		jobAttribs['JobUrl'] = titleAnchor['href']
		if not self.__class__._isAbsoluteUrl(jobAttribs['JobUrl']):
	    	    jobAttribs['JobUrl'] = self.baseUrl + jobAttribs['JobUrl']
		jobAttribs['CompanyName'] = alltds[1].find("a").renderContents() # Index 1 will contain the name of the company offering the position ('CompanyName')
		jobAttribs['Locations'] = alltds[2].renderContents() # Index 2 will contain the geographical location at which the position exists ('Locations')
		jobAttribs['PostedOn'] = alltds[3].renderContents() # Index 3 will contain the date on which the job was posted on dice.com.
		# Now get the job details page using jobAttribs['JobUrl']
		jobDetailsPageContent = self._requestJobDetailsPage(jobAttribs['JobUrl'], logobj)
		if jobDetailsPageContent and jobDetailsPageContent != "":
		    if logobj:
	    	    	logobj.write("Attempting to parse job details page from '%s'"%jobAttribs['JobUrl'])
		    jobAttribs = self._parseJobDetailsPage(jobDetailsPageContent, jobAttribs, logobj)
		else:
		    logobj.write("Could not retrieve all details about the job '%s'.\n"%(jobAttribs['JobTitle']))
		# Remove all newline characters in the 'jobAttribs' values by '<br>'. Also remove multiple whitespace characters with a single space.
		for attr in jobAttribs.keys():
		    jobAttribs[attr] = self.__class__.newlinePattern.sub("<br>", jobAttribs[attr])
		    jobAttribs[attr] = self.__class__.multipleWhiteSpacesPattern.sub(" ", jobAttribs[attr])
		    jobAttribs[attr] = self.__class__.htmlTagPattern.sub("", jobAttribs[attr])
	    	    jobAttribs[attr] = self.__class__.htmlEntityPattern.sub("; ", jobAttribs[attr])
		# Create a 'Job' object and append it to 'jobslist'.
		job = Job(jobAttribs)
		jobslist.append(job)
	    # Now, increment the 'pageCtr' variable, retrieve the next page (if any) and create a soup object from its contents.
	    pageCtr += 1
	    nextPageAnchorTag = soup.find("a", {'class' : 'nextPage'})
	    nextPageUrl = None
	    if nextPageAnchorTag and nextPageAnchorTag.has_key("href"):
		nextPageUrl = nextPageAnchorTag["href"]
		if not self.__class__._isAbsoluteUrl(nextPageUrl):
	    	    nextPageUrl = self.baseUrl + os.path.sep + nextPageUrl
	        if logobj:
		    logobj.write("Retrieving page %d: %s\n"%(pageCtr, nextPageUrl))
		nextPageRequest = urllib2.Request(nextPageUrl, None, self.httpHeaders)
		try:
		    nextPageResponse = self.no_redirect_opener.open(nextPageRequest)
		    nextPageContent = self.__class__._decodeGzippedContent(nextPageResponse.read())
		    self.httpHeaders['Referer'] = nextPageUrl
		    soup = BeautifulSoup(nextPageContent)
		except:
		    print __file__.__str__() + ": Couldn't fetch page due to limited connectivity. Please check your internet connection and try again - " + sys.exc_info()[1].__str__()
		    if logobj:
		    	logobj.write(__file__.__str__() + ": Couldn't fetch page due to limited connectivity. Please check your internet connection and try again - " + sys.exc_info()[1].__str__())
	    else:
		break
	return(jobslist)

	
    # This method parses the job details page and populated the dict 'jobAttr' passed in as parameter.
    # The method returns the dict 'jobAttr' after populating it appropriately. Normally, 'jobAttr' contains
    # the keys 'JobTitle', 'JobUrl', 'PostedOn', 'Locations' and 'CompanyName' when it is passed to this
    # method. However, this method checks if any of those values are non-existent, and tries to fill it 
    # with information gathered from the job details page.
    def _parseJobDetailsPage(self, detailsPageContent, jobAttr, logobj=None):
	soup = None # Initialize variable 
	try:
	    soup = BeautifulSoup(detailsPageContent)
	except:
	    if logobj is not None:
		logobj.write(__file__.__str__() + ": Handling error in parsing - %s\n"%(sys.exc_info()[1].__str__()))
	    detailsPageContent = self.__class__.sanitizePageHTML(detailsPageContent)
	    soup = BeautifulSoup(detailsPageContent)
	    if self.__class__.DEBUG:
	    	print __file__.__str__() + ": Handled error in parsing - %s\n"%(sys.exc_info()[1].__str__())
	if not soup:
	    print __file__.__str__() + " : Could not create 'BeautifulSoup' object by parsing content \n"
	    return (None)
	descDiv = soup.find("div",{'id' : 'detailDescription' })
	if not descDiv:
	    descDiv = soup.find("ul", {'class' : 'job_details'})
	if not descDiv:
	    descDiv = soup.find("div", {'class' : 'job_description'})
	if descDiv:
	    jobAttr['JobDescription'] = descDiv.renderContents()
	else:
	    jobAttr['JobDescription'] = ""
	contactInfoText = ""
	contactDiv = soup.find("div", {'id' : 'contactInfo'})
	if not contactDiv:
	    contactDiv = soup.find("div", {'class' : 'contact_info'})
	    if not contactDiv: # If no contact information can be found, then use the 'APPLY_FOR_JOB' link as contact info.
		contactDiv = soup.find("a", {'id' : 'APPLY_FOR_JOB'})
		applyUrl = ""
		if type(contactDiv) == dict and contactDiv.has_key("href"):
		    applyUrl = contactDiv["href"]
		if not self.__class__._isAbsoluteUrl(applyUrl):
	    	    applyUrl = self.baseUrl + applyUrl
		contactInfoText += "%s\n"%applyUrl
	    else:
		contactDivAllDt = contactDiv.findAll("dt")
		contactDivAllDd = contactDiv.findAll("dd")
		contactDivCtr = 0
		while contactDivCtr < contactDivAllDt.__len__():
		    contactInfoText += contactDivAllDt[contactDivCtr].renderContents()
		    if contactDivAllDd[contactDivCtr]:
			contactInfoText += contactDivAllDd[contactDivCtr].renderContents()
		    contactInfoText += "\n"
		    contactDivCtr += 1
	else:
	    contactInfoContents = contactDiv.renderContents()
	    soupDiv = BeautifulSoup(contactInfoContents)
	    allDivs = soupDiv.findAll("div")
	    for div in allDivs:
	    	contactInfoText += div.renderContents() + "\n"
	jobAttr['ContactDetails'] = contactInfoText
	overviewDiv = soup.find("div", {'id' : 'jobOverview'})
	if not overviewDiv:
	    overviewDiv = soup.find("div", {'class' : 'job_overview'})
	if not overviewDiv:
	    jobAttr['KeySkills'] = ""
	else:
	    dtSkills = overviewDiv.find("dt").renderContents()
	    if dtSkills == "Skills:":
	    	jobAttr['KeySkills'] = overviewDiv.find("dd").renderContents()
	jobAttr['Experience'] = ""
	jobAttr['Function'] = ""
	jobAttr['Role'] = ""
	# For experience, look for the string 'experience' in the content, and retrieve all the content immediately surrounding it.
	expPattern = re.compile(">[^<>]+experience[^<>]+<", re.IGNORECASE | re.DOTALL)
	listExperiences = expPattern.findall(detailsPageContent)
	for exp in listExperiences:
	    exp = exp.strip('<').strip('>')
	    jobAttr['Experience'] += exp + "\n"
	# TODO: Roles and functions are not specified in a formatted manner in dice. We need to get them from the free form text (job description).
	return (jobAttr)
	

    # TODO: This method looks at the page pointed by "http://seeker.dice.com/jobsearch/browse/" and extracts jobs listed
    # by the skillsets listed on that page. The return value from this method is a python dict whose keys are
    # skillset names (like 'Cisco Network Engineer', 'Perl Developer', 'Development Manager' etc.) and the values
    # are 'Job' objects listed for the skillset. The Job objects are extracted from "http://seeker.dice.com/jobsearch/results/<Skillset-Name>
    # where the <Skillset-Name> is the name of the skillset.
    def searchJobsBySkillsets(self):
	pass

    # TODO: This method looks at the page pointed by "http://seeker.dice.com/employerDirectory/servlet/EmployerDirectory?op=1000&firstChar=<Char>"
    # and extracts jobs listed for the organizations listed on that page. The return value from this method
    # is a python dict whose keys are company names (like 'Black Duck Software', 'Mehra Innovations' etc.) and the values
    # are 'Job' objects listed for the company.
    def searchJobsByCompany(self):
	pass



def main(cfgPath="./conf/jobscraper.cfg"):
    cvbrowser = DiceScraper(cfgPath)
    log = Logger(cvbrowser.logPath + os.path.sep + sys.argv[0][0:-2] + "log")
    diceUserId = "testuserdice@yahoo.com"
    dicePasswd = "somesecret"
    log.write("Trying to login into 'dice.com' as '%s'...\n"%diceUserId)
    cvbrowser.doLogin(diceUserId, dicePasswd)
    retval = cvbrowser.isLoggedIn()
    if not retval or retval == -1:
	log.write("Unable to login into 'dice.com' with userid = '%s' \n"%diceUserId)
    else:
	log.write("Successfully logged into 'dice.com' with userid = '%s' \n"%diceUserId)
    searchPageContent = cvbrowser.getSearchInterfacePage()
    if searchPageContent and searchPageContent != "":
	log.write("Successfully fetched search form\n")
    else:
	log.write("Could not get search form\n")
    countrywiseJobs = cvbrowser.getSearchResults(log, 'germany')
    for country in countrywiseJobs.keys():
	print "Processing jobs in %s: "%country
	log.write("Processing jobs in %s:\n"%country)
	countryCsvDataFile = "./testdumps/DiceCountries/" + JobScraper.JobScraper.generateContextualFilename(country, "csv")
	fcsv = open(countryCsvDataFile, "w")
	Job.dumpHeader(fcsv)
	for job in countrywiseJobs[country]:
	    job.dumpData(fcsv)
	fcsv.close()
	print "File '%s' for '%s' generated."%(countryCsvDataFile, country)
	log.write("File '%s' for '%s' generated.\n"%(countryCsvDataFile, country))
    print "Done crawling dice.com\n"
    log.write("Done crawling dice.com\n")
    log.close()
    



if __name__ == "__main__":
    main()

