import os, sys, re, time, gzip
import simplejson as json
import urllib, urllib2, httplib
from BeautifulSoup import BeautifulSoup
from urlparse import urlparse, urlsplit
import StringIO
import mimetypes, mimetools
from ConfigParser import ConfigParser
import JobScraper
from Jobs import Job
from LogHandler import Logger
import tidy


class ShineScraper(JobScraper.JobScraper):
    # Regex pattern used in this class.
    loginActionPattern = re.compile(r"login", re.IGNORECASE | re.MULTILINE | re.DOTALL)
    nbspPattern = re.compile("&nbsp;", re.MULTILINE | re.DOTALL)
    jobIdPattern = re.compile("\#(\d+)\s\-\sShine.com", re.MULTILINE | re.DOTALL)
    # Set DEBUG to 'True' during development
    DEBUG = True
    def __init__(self, cfgFile):
	super(ShineScraper, self).__init__(cfgFile, "http://www.shine.com/", "http://www.shine.com/myshine/login/")
	self.httpHeaders['Referer'] = self.websiteUrl
	self.jobsByIndustry = {}
	self.industryWiseCounts = {}
	self.industryLinks = {}

    # Method to login into a account (as job seeker) on shine.com
    # The account credentials are passed as arguments to the method.
    # Both arguments ('username' and 'password') are mandatory. 
    # The return value is the HTML content of the page that appears
    # immediately after logging in. The same content is also stored
    # and accessible as '<object>.currentPageContent'. 
    # Note: the value of the 'username' parameter may be an email Id.
    def doLogin(self, username, password):
	formData = self._getLoginFormElementsDict()
	formData['email'] = username
	formData['password'] = password
	self.postData = urllib.urlencode(formData)
	httpHeaders = {}
	for hdrkey in self.httpHeaders.keys():
	    if hdrkey == 'Keep-Alive':
		continue
	    httpHeaders[hdrkey] = self.httpHeaders[hdrkey]
	httpHeaders['Content-Type'] = 'application/x-www-form-urlencoded'
	httpHeaders['Content-Length'] = len(self.postData)
	httpHeaders['Referer'] = self.loginPageUrl
	self.pageRequest = urllib2.Request(self.requestUrl, self.postData, httpHeaders)
	try:
            self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
            self.sessionCookies = self.__class__._getCookieFromResponse(self.pageResponse)
            self.httpHeaders["Cookie"] += self.sessionCookies
	    httpHeaders["Cookie"] += self.sessionCookies
	    if self.__class__.DEBUG:
	        print "Cookies Received: " + self.sessionCookies.__str__()
        except:
            print "dice.py: Couldn't fetch page due to limited connectivity. Please check your internet connection and try again - " + sys.exc_info()[1].__str__()
	# In this response we also expect a 'Location' header
	responseHeaders = self.pageResponse.info()
	if responseHeaders.getheader('Location'):
	    self.requestUrl = responseHeaders.getheader('Location')
	else:
	    sys.exit()
	# Delete the keys 'Origin', 'Content-Length' and 'Content-Type' from httpHeaders
	del httpHeaders['Content-Length']
	del httpHeaders['Content-Type']
	for hkey in httpHeaders.keys():
	    self.httpHeaders[hkey] = httpHeaders[hkey]
	self.pageRequest = urllib2.Request(self.requestUrl, None, self.httpHeaders)
	try:
            self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
            self.sessionCookies = self.__class__._getCookieFromResponse(self.pageResponse)
	    if self.__class__.DEBUG:
	    	print "Session Cookies #1: " + self.sessionCookies.__str__() + "\n"
            self.httpHeaders["Cookie"] = self.sessionCookies
        except:
            print __file__.__str__() + ": Couldn't fetch page due to limited connectivity. Please check your internet connection and try again - " + sys.exc_info()[1].__str__()
        self.httpHeaders["Referer"] = self.requestUrl
	# Initialize the account related variables...
        self.currentPageContent = self.__class__._decodeGzippedContent(self.getPageContent())
	if self.__class__.DEBUG:
	    fh=open("/home/supmit/work/odesk/SalesForceCVSearch/testdumps/shine.html", "w")
	    fh.write(self.currentPageContent)
	    fh.close()
	return(self.currentPageContent)


    # Method to retrieve the form elements and their values
    # from the login form. This is supposed to be called from
    # 'doLogin' method. Return value is a dictionary with the
    # element names as keys and their values (if available)
    # as values. This method also sets the values of self.requestUrl
    # and self.requestMethod to the values of the 'action' and 
    # 'method' attributes of the form respectively.
    def _getLoginFormElementsDict(self):
	self.currentPageContent = self.__class__.sanitizePageHTML(self.currentPageContent)
	soup = BeautifulSoup(self.currentPageContent)
	loginForm = soup.find("form", {'action' : self.__class__.loginActionPattern })
    	self.requestUrl = loginForm["action"]
	self.requestMethod = loginForm["method"]
	if not self.__class__._isAbsoluteUrl(self.requestUrl):
	    self.requestUrl = self.baseUrl + os.path.sep + self.requestUrl
	allInputs = loginForm.findAll("input")
	formElementsDict = {}
	for inputTag in allInputs:
	    if inputTag.has_key("type") and inputTag["type"] == "checkbox": # Not interested in checkboxes.
		continue
	    if inputTag.has_key("name"):
		formElementsDict[inputTag["name"]] = ""
	    if inputTag.has_key("value"):
		formElementsDict[inputTag["name"]] = inputTag["value"]
	return(formElementsDict)

    # Method to generically navigate to a page pointed to
    # by the 'pageUrl' parameter. The contents of the page
    # will be returned. No object attributes are set or 
    # changed in this method.
    # TODO: This method is exactly same as the 'navigatePage' 
    # method in monster.py. So this method should be ideally
    # a part of JobScraper.JobScraper class (in 'JobScraper.py' file).
    def navigatePage(self, pageUrl):
	domainNameRegex = re.compile(self.domainName, re.IGNORECASE)
	domainNameSearch = domainNameRegex.search(pageUrl)
	if not domainNameSearch:
	    print "External page URL: " + pageUrl + "\n"
	    return None
	self.requestUrl = pageUrl
	self.pageRequest = urllib2.Request(self.requestUrl, None, self.httpHeaders)
	try:
            self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
        except:
            print __file__.__str__() + ": Couldn't fetch page due to limited connectivity. Please check your internet connection and try again" + sys.exc_info()[1].__str__()
	responseHeaders = self.pageResponse.info()
	if responseHeaders.getheader('Location'):
	    redirectUrl = responseHeaders.getheader('Location')
	    self.requestUrl = redirectUrl
	    self.httpHeaders["Referer"] = self.requestUrl
	    self.pageRequest = urllib2.Request(self.requestUrl, None, self.httpHeaders)
	    try:
            	self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
            except:
            	print __file__.__str__() + ": Couldn't fetch page due to limited connectivity. Please check your internet connection and try again" + sys.exc_info()[1].__str__()
        self.httpHeaders["Referer"] = self.requestUrl
        # Initialize the account related variables...
        self.currentPageContent = self.__class__._decodeGzippedContent(self.getPageContent())
	return(self.currentPageContent)

    # This method will fetch the contents of the search page.
    # The contents will be returned as well as self.currentPageContent
    # value will be set with it. The search page is identified by the
    # anchor tag surrounding the text 'Job Search'.
    def getSearchInterfacePage(self):
	pageContent = self.currentPageContent
	soup = BeautifulSoup(pageContent)
	jobSearchPattern = re.compile(r"\/job\-search\/",re.MULTILINE | re.DOTALL)
	atag = soup.find("a", {'href' :  jobSearchPattern })
	while atag:
	    contents = atag.renderContents()
	    if contents != 'Job Search':
		atag = atag.findNext("a", { 'href' : jobSearchPattern })
		continue
	    else:
		searchPageUrl = atag["href"]
		if not self.__class__._isAbsoluteUrl(searchPageUrl):
	    	    searchPageUrl = self.baseUrl + os.path.sep + searchPageUrl
		pageContent = self.navigatePage(searchPageUrl)
		self.currentPageContent = pageContent
		return(pageContent)
		

    # This method parses the contents returned by the call to 
    # the job search page ('getSearchInterfacePage' method). The 
    # contents of the page are accessed as self.currentPageContent.
    # The return value of this method is a dictionary whose
    # keys are the various industry names and the values are
    # the URLs pointing to the first job listing page for those 
    # industries.
    def _extractIndustriesLinks(self):
	soup = BeautifulSoup(self.currentPageContent)
	divIndustries = soup.find("div", {'id' : 'IndustryJobs'})
	divIndustries = divIndustries.findNext("div", {'class' : 'jobs_search_mid'})
	industriesContent = divIndustries.renderContents()
	isoup = BeautifulSoup(industriesContent)
	allAnchors = isoup.findAll("a")
	self.industryLinks = {}
	jobCountPattern = re.compile(r"\((\d+)\)", re.MULTILINE | re.DOTALL)
	for anchor in allAnchors:
	    anchorUrl = None
	    if anchor.has_key("href"):
		anchorUrl = anchor["href"]
	    else:
		continue
	    if not self.__class__._isAbsoluteUrl(anchorUrl):
	    	anchorUrl = self.baseUrl + anchorUrl
	    indNameInfo = anchor.renderContents()
	    indName, indInfo = indNameInfo.split("Jobs")
	    indName = self.__class__.nbspPattern.sub("", indName)
	    jobCountSearch = jobCountPattern.search(indInfo)
	    jobCount = None
	    if jobCountSearch:
		jobCount = jobCountSearch.groups()[0]
	    self.industryWiseCounts[indName] = jobCount
	    self.industryLinks[indName] = anchorUrl
	return(self.industryLinks)

    # Wrapper over _extractIndustriesLinks so that we can use it
    # in 'main()'. ('_extractIndustriesLinks' could also be used
    # directly from 'main()', but since it was written to be used
    # as a private method, we created this wrapper to make use of
    # it externally.
    def getJobListingUrlsByIndustry(self):
	self._extractIndustriesLinks()
	return(self.industryLinks)


    # This method retrieves a page of job listings for a
    # specific industry. The actual task of retrieving
    # the page is done by the method 'navigatePage'. It may 
    # be argued that this method is not necessary, and actually
    # the existence of this method is solely for the purpose
    # of making the code organized and clear to enhance 
    # maintainability.
    def getJobsListingStartPage(self, pageUrl):
	pageContent = self.navigatePage(pageUrl)
	return(pageContent)

    # This method extracts all jobs info for a specified
    # industry (passed as an argument). It handles extraction
    # of jobs info from multiple pages of listing, gets the
    # jobs details by extracting info from the job details 
    # page and creates a 'Job' object for each job processed.
    # It returns the info as a dictionary, whose keys are the
    # URLs to the job details pages, and the values are the
    # 'Job' objects created using the info for each job. It 
    # gets the URL to the first page of the job listing for 
    # the specified industry by using the 'industryLinks'
    # object attribute populated by the call to 'getJobListingUrlsByIndustry'.
    def getJobsDict(self, industry, logObj=None):
	startPageUrl = self.industryLinks[industry]
	allJobUrlsList = []
	jobsDict = {}
	print "'%s' industry has %s jobs at %s \n"%(industry, self.industryWiseCounts[industry], startPageUrl)
	if logObj is not None:
	    logObj.write("'%s' industry has %s jobs at %s\n"%(industry, self.industryWiseCounts[industry], startPageUrl))
	pageContent = self.getJobsListingStartPage(startPageUrl)
	pageContent = pageContent.decode('ascii', 'ignore')
	pageContent = self.__class__.sanitizePageHTML(pageContent)
	nextPageNumber = 2
	nextPageUrl = startPageUrl + "&page=1"
	errorPagePattern = re.compile(r"Error\s+Page", re.IGNORECASE | re.MULTILINE | re.DOTALL)
	while pageContent is not None:
	    jobUrlsList = self._getJobLinksFromPage(pageContent)
	    allJobUrlsList.extend(jobUrlsList)
	    logObj.write("Page # %s contains %s links\n"%((nextPageNumber - 1).__str__(), jobUrlsList.__len__().__str__()))
	    nextPageUrl = self.getNextPageUrl(nextPageUrl, nextPageNumber)
	    if nextPageUrl and self.__class__.DEBUG:
		print "Processing page: %s\n"%nextPageNumber.__str__()
		logObj.write("Next Page URL: %s\n"%nextPageUrl)
	    logObj.write("Processing page: %s\n"%nextPageNumber.__str__())
	    pageContent = None
	    if nextPageUrl is not None:
	    	pageContent = self.navigatePage(nextPageUrl)
		pageContent = pageContent.decode('ascii', 'ignore')
		soup = None # Initialize soup...
		try:
		    soup = BeautifulSoup(pageContent)
		except:
		    logObj.write("Error processing content at page # %s: %s\n"%(nextPageNumber.__str__(), sys.exc_info()[1].__str__()))
		    tidy_opts = dict(output_xhtml=1, add_xml_decl=0, indent=1, tidy_mark=0)
		    tidy_html = str(tidy.parseString(pageContent, **tidy_opts))
		    pageContent = tidy_html
		    soup = BeautifulSoup(pageContent)
		pageTitleTag = soup.find("title")
		if pageTitleTag is not None:
		    pageTitle = pageTitleTag.renderContents()
		    if errorPagePattern.search(pageTitle):
		    	pageContent = None
		    	break
		nextPageNumber += 1
	if logObj:
	    logObj.write("Job page URLs for industry '%s':\n%s (%s)\n\n"%(industry, allJobUrlsList.__str__(), allJobUrlsList.__len__().__str__()))
	self.httpHeaders["Referer"] = startPageUrl
	logObj.write("Retrieving details of %s jobs found for '%s' industry\n"%(allJobUrlsList.__len__().__str__(),industry))
	for jobUrl in allJobUrlsList:
	    jobDetailsPage = self.navigatePage(jobUrl)
	    jobDetailsPage = jobDetailsPage.decode('ascii', 'ignore')
	    jobAttrs = self.parseJobDetailsPage(jobDetailsPage)
	    jobAttrs['JobUrl'] = jobUrl
	    jobsDict[jobUrl] = Job(jobAttrs)
	return(jobsDict)


    # This method retrieves the URL of the next page
    # containing listing of jobs by parsing the content
    # of the current page. If the current page is not
    # a job listing page, then this method returns 'None'.
    # Otherwise, it returns the URL of the next page. 
    def getNextPageUrl(self, startPageUrl, pageNum):
	pageNumPattern = re.compile(r"^(http:\/\/www\.shine\.com\/myshine\/job\-search\/simple\/.*)&page=\d+$")
	pageNumSearch = pageNumPattern.search(startPageUrl)
	if pageNumSearch:
	    nextPageUrl = pageNumSearch.groups()[0]
	    nextPageUrl += "&page=" + pageNum.__str__()
	else:
	    nextPageUrl = None
	return(nextPageUrl)

    def getNextPageUrl_old(self, pageContent):
	soup = BeautifulSoup(pageContent)
	paginationPattern = re.compile(r"javascript\:pagination\(\'([^\']+)\'\)", re.MULTILINE | re.DOTALL)
	button3Pattern = re.compile(r"button3\.gif", re.MULTILINE | re.DOTALL)
	paginationAnchorsList = soup.findAll("a", {'onclick' : paginationPattern})
	nextPageUrl = None
	for pageAnchor in paginationAnchorsList:
	    anchorContent = pageAnchor.renderContents()
	    if not button3Pattern.search(anchorContent):
		continue
	    onclickCall = pageAnchor["onclick"]
	    paginationSearch = paginationPattern.search(onclickCall)
	    if paginationSearch:
		nextPageUrl = paginationSearch.groups()[0]
	    	nextPageUrl = nextPageUrl.decode("ascii", "ignore")
	    	if not self.__class__._isAbsoluteUrl(nextPageUrl):
	    	    nextPageUrl = self.baseUrl + nextPageUrl
	    break
	return(nextPageUrl)

    # This method parses a job listing page (whose
    # content is passed as argument) and returns a 
    # list containing the URLs to job details pages
    # of each of the jobs listed in that page.
    def _getJobLinksFromPage(self, pageContent):
	soup = BeautifulSoup(pageContent)
	jobAnchorTagsList = soup.findAll("a", {'class' : 'cls_searchresult_a'})
	jobUrlsList = []
	for anchorTag in jobAnchorTagsList:
	    jobUrl = anchorTag["href"]
	    jobUrl = jobUrl.decode("ascii", 'ignore')
	    if not self.__class__._isAbsoluteUrl(jobUrl):
	    	jobUrl = self.baseUrl + jobUrl
	    jobUrlsList.append(jobUrl)
	return(jobUrlsList)

    # This method parses a job details page and returns
    # a dictionary. The keys are the various attributes
    # supported by the 'Job' class. 
    def parseJobDetailsPage(self, pageContent):
	soup = None
	try:
	    soup = BeautifulSoup(pageContent)
	except:
	    tidy_opts = dict(output_xhtml=1, add_xml_decl=0, indent=1, tidy_mark=0)
	    tidy_html = str(tidy.parseString(pageContent, **tidy_opts))
	    pageContent = tidy_html
	    soup = BeautifulSoup(pageContent)
	job_attribs = {}
	jobFunctionSpan = soup.find("span", {'itemprop' : 'occupationalCategory'})
	if jobFunctionSpan is not None:
	    job_attribs['Function'] = jobFunctionSpan.renderContents()
	else:
	    job_attribs['Function'] = ""
	jobLocationSpan = soup.find("span", {'itemprop' : 'address'})
	if jobLocationSpan is not None:
	    job_attribs['Locations'] = jobLocationSpan.renderContents()
	else:
	    job_attribs['Locations'] = ""
	jobExperienceSpan = soup.find("span", {'itemprop' : 'experienceRequirements'})
	if jobExperienceSpan is not None:
	    job_attribs['Experience'] = jobExperienceSpan.renderContents()
	else:
	    job_attribs['Experience'] = ""
	jobTitleSpan = soup.find("span", {'itemprop' : 'title'})
	if jobTitleSpan is not None:
	    job_attribs['JobTitle'] = jobTitleSpan.renderContents()
	else:
	    job_attribs['JobTitle'] = ""
	jobCompanyNameSpan = soup.find("span", {'itemprop' : 'name'})
	if jobCompanyNameSpan is not None:
	    job_attribs['CompanyName'] = jobCompanyNameSpan.renderContents()
	else:
	    job_attribs['CompanyName'] = ""
	jobPostedDateSpan = soup.find("span", {'itemprop' : 'datePosted'})
	if jobPostedDateSpan is not None:
	    job_attribs['PostedOn'] = jobPostedDateSpan.renderContents()
	else:
	    job_attribs['PostedOn'] = ""
	jobDescriptionSpan = soup.find("span", {'itemprop' : 'description'})
	if jobDescriptionSpan is not None:
	    job_attribs['JobDescription'] = jobDescriptionSpan.renderContents()
	else:
	    job_attribs['JobDescription'] = ""
	# Since shine.com doesn't display contact details explicitly, we will
	# store the 'Apply' URL in the 'ContactDetails' field.
	pageTitleTag = soup.find("title")
	if pageTitleTag is not None:
	    pageTitle = pageTitleTag.renderContents()
	else:
	    pageTitle = ""
	jobIdSearch = self.__class__.jobIdPattern.search(pageTitle)
	jobId = None
	if jobIdSearch is not None:
	    jobId = jobIdSearch.groups()[0]
	    applyUrl = "http://www.shine.com/myshine/jobs/apply/" + jobId + "/?confirm=true"
	    job_attribs['ContactDetails'] = applyUrl
	else:
	    job_attribs['ContactDetails'] = ""
	return(job_attribs)

# Starting point of crawl operation:
def main(cfgPath="./conf/jobscraper.cfg"):
    shineObj = ShineScraper(cfgPath)
    log = None
    try:
    	log = Logger(shineObj.logPath + os.path.sep + sys.argv[0][0:-2] + "log")
    except:
	print "Error creating log object: " + sys.exc_info()[1].__str__() + "\n"
    shineUserId = "supmit2k3@yahoo.com"
    shinePassword = "spmprx"
    shineObj.doLogin(shineUserId, shinePassword)
    if shineObj.assertLogin("Logout") and log is not None:
    	log.write("Successfully logged into shine.com as '%s'\n"%(shineUserId))
    shineObj.getSearchInterfacePage()
    industriesLinks = shineObj.getJobListingUrlsByIndustry()
    logstring_industries = "\n"
    for indName in industriesLinks.keys():
	logstring_industries += indName + " ====>> " + industriesLinks[indName] + "\n"
    if shineObj.__class__.DEBUG:
	print "\nLinks to the first pages of industrywise jobs listing:\n" + logstring_industries + "\n"
    if log is not None:
    	log.write("\nLinks to the first pages of industrywise jobs listing:\n" + logstring_industries + "\n")
    # Now iterate through the industrywise job listing pages...
    industryNames = industriesLinks.keys()
    industryNames.sort()
    for indName in industryNames:
	print "Preparing to handle '%s' industry...\n"%indName
	if log is not None:
	    log.write("Preparing to handle '%s' industry...\n"%indName)
	shineObj.jobsByIndustry[indName] = shineObj.getJobsDict(indName, log)
	indFilename = "./testdumps/ShineIndustries/" + JobScraper.JobScraper.generateContextualFilename(indName, "csv")
	fh = open(indFilename, "w")
    	Job.dumpHeader(fh, "csv")
    	# Iterate over job objects in shineObj.jobsByIndustry[indName] to dump the collected info as CSV.
	for joburl in shineObj.jobsByIndustry[indName].keys():
	    jobObj = shineObj.jobsByIndustry[indName][joburl]
	    jobObj.dumpData(fh, "csv")
	fh.close()
	if log is not None:
	    log.write("Done with '%s' industry\n=========================================\n"%indName)
	print "Completed '%s' industry jobs\n=========================================\n"%indName




if __name__ == "__main__":
    main()

