import os, sys, re, time, gzip
import urllib, urllib2, httplib
from BeautifulSoup import BeautifulSoup
import HTMLParser
from urlparse import urlparse, urlsplit
import StringIO
import mimetypes, mimetools
from ConfigParser import ConfigParser
#from lxml.html.clean import clean_html
from Jobs import Job
import JobScraper
from LogHandler import Logger
import tidy


class MonsterScraper(JobScraper.JobScraper):
    # Regex patterns used in this class
    jobTitlePattern = re.compile(r"\'Job\s+Title:\s*([^\']+)\'\,")
    companyNamePattern = re.compile(r"Company\s+Name<\/span>\s*<br\s*\/>\s*([^<]+)<br\s*\/>", re.IGNORECASE | re.MULTILINE | re.DOTALL)
    locationPattern = re.compile(r"Locations<\/span><br\s*\/>\s*([^<]+)<br\s*\/>", re.IGNORECASE | re.MULTILINE | re.DOTALL)
    keySkillsPattern = re.compile(r">Key\s+Skills<\/span><br\s*\/><div\s*[^>]+>([^<]+)<\/div>", re.IGNORECASE | re.MULTILINE | re.DOTALL)
    experiencePattern = re.compile(r">Experience<\/span>\s*<br\s*\/>\s*([^<]+)<br\s*\/>", re.IGNORECASE | re.MULTILINE | re.DOTALL)
    functionPattern = re.compile(r">Function<\/span><br\s*\/>\s*([^<]+)<br\s*\/>", re.IGNORECASE | re.MULTILINE | re.DOTALL)
    rolePattern = re.compile(r'>Role<\/span><br\s*\/>(.*)<span\s+class=\"bold\s+font_13\">', re.IGNORECASE | re.MULTILINE | re.DOTALL)
    postedOnPattern = re.compile(r"Posted\s+On<\/span><br\s*\/>\s*([^<]+)<br\s*\/>", re.IGNORECASE | re.MULTILINE | re.DOTALL)
    numericHtmlEntityPattern = re.compile(r"&#\d{3,4};", re.MULTILINE | re.DOTALL)

    def __init__(self, cfgFile):
	super(MonsterScraper, self).__init__(cfgFile, "http://www.monster.com/", "http://my.monsterindia.com/login.html")

    def _getLoginFormElementsDict(self):
	escapeQuotePattern = re.compile(r"\\\"")
	pageContent = escapeQuotePattern.sub("'", self.currentPageContent)
	unmatchedQuotePattern = re.compile(r"[^=]\s*\"\"")
	self.currentPageContent = unmatchedQuotePattern.sub("\"", pageContent)
	bsoup = BeautifulSoup(self.currentPageContent)
	loginForm = bsoup.find("form", {'name' : 'myform'})
	self.requestUrl = loginForm["action"]
	self.requestMethod = loginForm["method"]
	if not self.__class__._isAbsoluteUrl(self.requestUrl):
	    self.requestUrl = self.baseUrl + os.path.sep + self.requestUrl
	#print self.requestUrl + "\n"
	formElementsDict = {}
	allHiddenElements = bsoup.findAll("input", {'type' : 'hidden'})
	for elem in allHiddenElements:
	    formElementsDict[elem['name']] = elem['value']
	formElementsDict["submit"] = 'Login'
	formElementsDict["checkbox"] = 0
	formElementsDict["userName"] = ''
	formElementsDict["passwd"] = ''
	return(formElementsDict)

    def doLogin(self, username, password):
	formData = self._getLoginFormElementsDict()
	if not self.siteUsername or not self.sitePassword:
	    self.siteUsername = username
	    self.sitePassword = password
	formData["userName"] = self.siteUsername
	formData["passwd"] = self.sitePassword
	try:
	    atIndex = formData["userName"].index("@")
	    if atIndex > 0:
		formData["checkbox"] = '1'
	except ValueError:
	    pass
	self.postData = urllib.urlencode(formData)
	self.pageRequest = urllib2.Request(self.requestUrl, self.postData, self.httpHeaders)
	try:
            self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
            self.sessionCookies = self.__class__._getCookieFromResponse(self.pageResponse)
            self.httpHeaders["Cookie"] = self.sessionCookies
        except:
            print __file__.__str__() + ": Couldn't fetch page due to limited connectivity. Please check your internet connection and try again" + sys.exc_info()[1].__str__()

	# In this response we also expect a 'Location' header
	responseHeaders = self.pageResponse.info()
	self.requestUrl = responseHeaders.getheader('Location')
	#print "Location: " + self.requestUrl + "\n"
	self.pageRequest = urllib2.Request(self.requestUrl, None, self.httpHeaders)
	try:
            self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
            self.sessionCookies = self.__class__._getCookieFromResponse(self.pageResponse)
            self.httpHeaders["Cookie"] = self.sessionCookies
        except:
            print __file__.__str__() + ": Couldn't fetch page due to limited connectivity. Please check your internet connection and try again" + sys.exc_info()[1].__str__()
        self.httpHeaders["Referer"] = self.requestUrl
        # Initialize the account related variables...
        self.currentPageContent = self.__class__._decodeGzippedContent(self.getPageContent())
	return(self.currentPageContent)

    # TODO: This method is exactly same as the 'navigatePage' 
    # method in shine.py. So this method should be ideally
    # a part of JobScraper.JobScraper class (in 'JobScraper.py' file).
    def navigatePage(self, pageUrl):
	domainNameRegex = re.compile(self.domainName, re.IGNORECASE)
	domainNameSearch = domainNameRegex.search(pageUrl)
	if not domainNameSearch:
	    print "External page URL: " + pageUrl + "\n"
	    return None
	self.requestUrl = pageUrl
	self.pageRequest = urllib2.Request(self.requestUrl, None, self.httpHeaders)
	try:
            self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
        except:
            print __file__.__str__() + ": Couldn't fetch page due to limited connectivity. Please check your internet connection and try again" + sys.exc_info()[1].__str__()
	responseHeaders = self.pageResponse.info()
	if responseHeaders.getheader('Location'):
	    redirectUrl = responseHeaders.getheader('Location')
	    self.requestUrl = redirectUrl
	    self.httpHeaders["Referer"] = self.requestUrl
	    self.pageRequest = urllib2.Request(self.requestUrl, None, self.httpHeaders)
	    try:
            	self.pageResponse = self.no_redirect_opener.open(self.pageRequest)
            except:
            	print __file__.__str__() + ": Couldn't fetch page due to limited connectivity. Please check your internet connection and try again" + sys.exc_info()[1].__str__()
        self.httpHeaders["Referer"] = self.requestUrl
        # Initialize the account related variables...
        self.currentPageContent = self.__class__._decodeGzippedContent(self.getPageContent())
	return(self.currentPageContent)

    def _extractIndustriesLinks(self):
	tidy_opts = dict(output_xhtml=0, add_xml_decl=0, indent=1, tidy_mark=0)
	tidy_html = tidy.parseString(self.currentPageContent, **tidy_opts).__str__()
	self.currentPageContent = tidy_html
	bsoup = BeautifulSoup(tidy_html)
	allTds = bsoup.findAll("td", {'class' : 'bg_grey1'})
	industryLinks = {}
	indAnchorPattern = re.compile(r"\/searchresult\.html\?")
	for td in allTds:
	    tdcontents = td.renderContents()
	    soup2 = BeautifulSoup(tdcontents)
	    allAnchorTags = soup2.findAll("a")
	    for atag in allAnchorTags:
		indName = atag.renderContents()
		indUrl = atag["href"]
		if indAnchorPattern.search(indUrl):
		    indName = re.sub(self.__class__.multipleWhiteSpacesPattern, "", indName)
		    industryLinks[indName] = indUrl
		else:
		    continue
	return(industryLinks)


    def getJobsCount(self, pageContent=None):
	count = -1
	if pageContent is None:
	    pageContent = self.currentPageContent
	if pageContent == "":
	    print "'pageContent' is empty.\n"
	    return 0
	tidy_opts = dict(output_xhtml=0, add_xml_decl=0, indent=1, tidy_mark=0)
	tidy_html = tidy.parseString(pageContent, **tidy_opts).__str__()
	pageContent = tidy_html
	bsoup = BeautifulSoup(pageContent)
	ajax_float_div = bsoup.find("div", { 'id' : 'dd_ajax_float' })
	bsoup = BeautifulSoup(ajax_float_div.renderContents())
	jobcounttd = bsoup.find("td", { 'class' : 'txt_white' })
	jobcounttdContent = ""
	if jobcounttd is not None:
	    jobcounttdContent = jobcounttd.renderContents()
	else: # Maybe the industry is further sub categorised into industry specific roles... Return None so that we can identify these cases in the caller.
	    return None
	"""	
	else: # Maybe the industry is further sub categorised into industry specific roles... Check for 'table' tag with 'class=category1'
	    subcatTable = bsoup.find("table", {'class' : 'category1' })
	    subcatTableContent = subcatTable.renderContents()
	    # Now get all subcategory names and their URLs...
	    subcatUrlDict = {}
	"""
	jobCountPattern = re.compile(r"\s+\-\s+\d+\s+out\s+of\s+(\d+)\s*&nbsp;", re.IGNORECASE | re.MULTILINE | re.DOTALL)
	jobCountSearch = jobCountPattern.search(jobcounttdContent)
	if jobCountSearch:
	    jobCountGroups = jobCountSearch.groups()
	    count = jobCountGroups[0]
	return(count)

    # This method parses the job details page and returns the data as a Jobs.Job object 
    def getJobDetails(self, jobDetailsPageContent, jobPageUrl=""):
	job_attribs = {}
	tidy_opts = dict(output_xhtml=0, add_xml_decl=0, indent=1, tidy_mark=0)
	tidy_html = tidy.parseString(jobDetailsPageContent, **tidy_opts).__str__()
	jobDetailsPageContent = tidy_html
	soup = BeautifulSoup(jobDetailsPageContent)
	titleSpan = soup.find("span", {'class' : 'font_16 bold'})
	job_attribs['JobTitle'] = titleSpan.renderContents()
	job_attribs['JobTitle'] = re.sub(self.__class__.multipleWhiteSpacesPattern, " ", job_attribs['JobTitle'])
	detailsTd = soup.find("td", { 'class' : 'bg_orange' })
	detailsTdContent = detailsTd.renderContents()
	detSoup = BeautifulSoup(detailsTdContent)
	allSpans = detSoup.findAll("span", {'class' : 'bold font_13'})
	for span in allSpans:
	    spanContent = span.renderContents()
	    spanContent = re.sub(self.__class__.multipleWhiteSpacesPattern, " ", spanContent)
	    if spanContent == "Company Name":
		companyNameSearch = self.__class__.companyNamePattern.search(detailsTdContent)
	    	if companyNameSearch:
		    job_attribs['CompanyName'] = companyNameSearch.groups()[0]
		    job_attribs['CompanyName'] = re.sub(self.__class__.multipleWhiteSpacesPattern, " ", job_attribs['CompanyName'])
		    job_attribs['CompanyName'] = re.sub(self.__class__.numericHtmlEntityPattern, "", job_attribs['CompanyName'])
		    job_attribs['CompanyName'] = re.sub(self.__class__.htmlTagPattern, "", job_attribs['CompanyName'])
		else:
		    job_attribs['CompanyName'] = ""
	    elif spanContent == "Locations":
		locationSearch = self.__class__.locationPattern.search(detailsTdContent)
	        if locationSearch:
		    job_attribs['Locations'] = locationSearch.groups()[0]
		    job_attribs['Locations'] = re.sub(self.__class__.multipleWhiteSpacesPattern, " ", job_attribs['Locations'])
		    job_attribs['Locations'] = re.sub(self.__class__.numericHtmlEntityPattern, "", job_attribs['Locations'])
		    job_attribs['Locations'] = re.sub(self.__class__.htmlTagPattern, "", job_attribs['Locations'])
		else:
		    job_attribs['Locations'] = ""
	    elif spanContent == "Experience":
		experienceSearch = self.__class__.experiencePattern.search(detailsTdContent)
	    	if experienceSearch:
		    job_attribs['Experience'] = experienceSearch.groups()[0]
		    job_attribs['Experience'] = re.sub(self.__class__.multipleWhiteSpacesPattern, " ", job_attribs['Experience'])
		    job_attribs['Experience'] = re.sub(self.__class__.numericHtmlEntityPattern, "", job_attribs['Experience'])
		    job_attribs['Experience'] = re.sub(self.__class__.htmlTagPattern, "", job_attribs['Experience'])
		else:
		    job_attribs['Experience'] = ""
	    elif spanContent == "Key Skills":
		keySkillsSearch = self.__class__.keySkillsPattern.search(detailsTdContent)
	    	if keySkillsSearch:
		    job_attribs['KeySkills'] = keySkillsSearch.groups()[0]
		    job_attribs['KeySkills'] = re.sub(self.__class__.multipleWhiteSpacesPattern, " ", job_attribs['KeySkills'])
		    job_attribs['KeySkills'] = re.sub(self.__class__.numericHtmlEntityPattern, "", job_attribs['KeySkills'])
		    job_attribs['KeySkills'] = re.sub(self.__class__.htmlTagPattern, "", job_attribs['KeySkills'])
		else:
		    job_attribs['KeySkills'] = ""
	    elif spanContent == "Function":
		functionSearch = self.__class__.functionPattern.search(detailsTdContent)
		if functionSearch:
	    	    job_attribs['Function'] = functionSearch.groups()[0]
		    job_attribs['Function'] = re.sub(self.__class__.multipleWhiteSpacesPattern, " ", job_attribs['Function'])
		    job_attribs['Function'] = re.sub(self.__class__.numericHtmlEntityPattern, "", job_attribs['Function'])
		    job_attribs['Function'] = re.sub(self.__class__.htmlTagPattern, "", job_attribs['Function'])
		else:
		    job_attribs['Function'] = ""
	    elif spanContent == "Role":
		roleSearch = self.__class__.rolePattern.search(detailsTdContent)
		if roleSearch:
	    	    job_attribs['Role'] = roleSearch.groups()[0]
		    job_attribs['Role'] = re.sub(self.__class__.multipleWhiteSpacesPattern, " ", job_attribs['Role'])
		    job_attribs['Role'] = re.sub(self.__class__.numericHtmlEntityPattern, "", job_attribs['Role'])
		    job_attribs['Role'] = re.sub(self.__class__.htmlTagPattern, "", job_attribs['Role'])
		else:
		    job_attribs['Role'] = ""
	    elif spanContent == "Posted On":
		postedOnSearch = self.__class__.postedOnPattern.search(detailsTdContent)
		if postedOnSearch:
	    	    job_attribs['PostedOn'] = postedOnSearch.groups()[0]
		    job_attribs['PostedOn'] = re.sub(self.__class__.multipleWhiteSpacesPattern, " ", job_attribs['PostedOn'])
		    job_attribs['PostedOn'] = re.sub(self.__class__.numericHtmlEntityPattern, "", job_attribs['PostedOn'])
		    job_attribs['PostedOn'] = re.sub(self.__class__.htmlTagPattern, "", job_attribs['PostedOn'])
		else:
		    job_attribs['PostedOn'] = ""
	# Look for Job URL
	onClickPattern = re.compile(r"STTAFFUNC\.cw\(this,\s*\{\s*id\s*:")
	imgTag = soup.find("img", { 'onclick' : onClickPattern })
	onClickText = imgTag["onclick"]
	linkPattern = re.compile(r",\s*link:\s*[\'\"]([^\'\"]+)[\"\']")
	linkSearch = linkPattern.search(onClickText)
	if linkSearch:
	    job_attribs['JobUrl'] = linkSearch.groups()[0]
	else:
	    job_attribs['JobUrl'] = ""
	# TODO: 'JobDescription', 'ContactDetails' need to be extracted.
	# Create 'Job' object and return it.
	job = Job(job_attribs)
	return (job)

    # This method extracts the list of job URLs from the page and returns these as a dict.
    # The URLs are the keys and the corresponding values would be a list comprised of the job
    # title, job date, company name and a short job description. 
    def getJobUrlsDict(self, pageContent):
	tidy_opts = dict(output_xhtml=0, add_xml_decl=0, indent=1, tidy_mark=0)
	tidy_html = tidy.parseString(pageContent, **tidy_opts).__str__()
	pageContent = tidy_html
	soup = BeautifulSoup(pageContent)
	jobUrlsDict = {}
	jobCheckbox = soup.find("input", { 'type' : 'checkbox', 'name' :  'job'})
	jobDatePattern = re.compile(r"<\/a>\,\s+([^<]+)<br", re.DOTALL)
	companyNamePattern = re.compile(r"\d{4}<br>([^<]+)<br>", re.DOTALL)
	while jobCheckbox :
	    jobId = None
	    if jobCheckbox.has_key("value"):
		jobId = jobCheckbox["value"]
	    if not jobId:
		break
	    jobTd = jobCheckbox.findNext("td")
	    jobTdContents = jobTd.renderContents()
	    jobLink = jobCheckbox.findNext("a", { 'id' : 'link' + jobId})
	    jobUrl = jobLink["href"]
	    jobTitle = jobLink.renderContents()
	    jobTitle = re.sub(self.__class__.multipleWhiteSpacesPattern, "", jobTitle)
	    jobDateSearch = jobDatePattern.search(jobTdContents)
	    jobDate = ""
	    if jobDateSearch:
		jobDate = jobDateSearch.groups()[0]
		jobDate = re.sub(self.__class__.multipleWhiteSpacesPattern, "", jobDate)
	    companyName = ""
	    companyNameSearch = companyNamePattern.search(jobTdContents)
	    if companyNameSearch:
	    	companyName = companyNameSearch.groups()[0]
		companyName = re.sub(self.__class__.multipleWhiteSpacesPattern, "", companyName)
	    jobShortDesc = jobTdContents
	    jobShortDesc = re.sub(self.__class__.multipleWhiteSpacesPattern, "", jobShortDesc)
	    jobUrlsDict[jobUrl] = [jobTitle , jobDate, companyName, jobShortDesc]
	    jobCheckbox = jobCheckbox.findNext("input", { 'type' : 'checkbox', 'name' :  'job'})
	return(jobUrlsDict)

    def getNextPageUrl(self, pageContent):
	tidy_opts = dict(output_xhtml=0, add_xml_decl=0, indent=1, tidy_mark=0)
	tidy_html = tidy.parseString(pageContent, **tidy_opts).__str__()
	pageContent = tidy_html
	soup = BeautifulSoup(pageContent)
	nextPageUrl = ""
	nextSearchResultsPagePattern = re.compile(r"searchresult\.html\?", re.MULTILINE | re.DOTALL)
	nextDiv = soup.find("div", {'id' : 'dd_ajax_float'})
	if not nextDiv: # Couldn't find the div enclosing the next page URL
	    return nextPageUrl
	divContents = nextDiv.renderContents()
	divSoup = BeautifulSoup(divContents)
	allAnchors = divSoup.findAll("a")
	nextPattern = re.compile(r"Next", re.MULTILINE | re.DOTALL)
	for atag in allAnchors:
	    aContents = atag.renderContents()
	    if nextPattern.search(aContents):
		nextPageUrl = atag["href"]
	if not nextSearchResultsPagePattern.search(nextPageUrl):
	    return ""
	if not self.__class__._isAbsoluteUrl(nextPageUrl):
	    urlParts = self.requestUrl.split("?")
	    urlPathParts = urlParts[0].split("/")
	    urlPathParts.pop()
	    partPath = "/".join(urlPathParts)
	    nextPageUrl = partPath + "/" + nextPageUrl
	return(nextPageUrl)

    # Fetch the contents of the job details page.
    def getJobDetailsPage(self, detailsUrl):
	detailsPageRequest = urllib2.Request(detailsUrl, None, self.httpHeaders)
	detailsPageContent = ""
	try:
            detailsPageResponse = self.no_redirect_opener.open(detailsPageRequest)
	    detailsPageContent = self.__class__._decodeGzippedContent(detailsPageResponse.read())
        except:
            print __file__.__str__() + ": Couldn't fetch page due to limited connectivity. Please check your internet connection and try again" + sys.exc_info()[1].__str__()
	return(detailsPageContent)

    # Fetches the content from the next job listing page.
    def fetchNextPage(self, nextPageUrl):
	nextPageRequest = urllib2.Request(nextPageUrl, None, self.httpHeaders)
	try:
            nextPageResponse = self.no_redirect_opener.open(nextPageRequest)
        except:
            print __file__.__str__() + ": Couldn't fetch page (" + nextPageUrl + ") due to limited connectivity. Please check your internet connection and try again" + sys.exc_info()[1].__str__()
	    return None
	nextPageContent = self.__class__._decodeGzippedContent(nextPageResponse.read())
	return (nextPageContent)



# Driver function for monsterindia.com
def main(cfgPath="./conf/jobscraper.cfg"):
    cvbrowser = MonsterScraper(cfgPath)
    log = Logger(cvbrowser.logPath + os.path.sep + sys.argv[0][0:-2] + "log")
    cvbrowser.doLogin(r"testusermonster@yahoo.com", "somesecret")
    industryListingPageUrl = "http://jobsearch.monsterindia.com/industry/index.html"
    cvbrowser.navigatePage(industryListingPageUrl)
    industriesDict = cvbrowser._extractIndustriesLinks()
    log.write("\n\nIndustries: " + industriesDict.__str__())
    # Go to each industry specific jobs listing page
    industryJobsStartPages = {} # Dictionary to store the HTML content of start page of each industry specific jobs listing
    indCtr = 1
    for indName in industriesDict.keys():
	print "fetching jobs list for '" + indName + "'..."
	log.write("fetching jobs list for '" + indName + "'...")
	indUrl = industriesDict[indName]
	cvbrowser.navigatePage(indUrl)
	industryJobsStartPages[indName] = cvbrowser.currentPageContent
	jcount = cvbrowser.getJobsCount(industryJobsStartPages[indName])
	print "%s jobs found!\n"%jcount
	log.write("%s jobs found!\n"%jcount)
	jobUrls = cvbrowser.getJobUrlsDict(industryJobsStartPages[indName])
	indFilename = "./testdumps/MonsterIndustries/" + JobScraper.JobScraper.generateContextualFilename(indName, "csv")
	nextPageUrl = cvbrowser.getNextPageUrl(industryJobsStartPages[indName])
	log.write("Next page URL: " + nextPageUrl + "\n")
	while nextPageUrl:
	    nextPageContent = cvbrowser.fetchNextPage(nextPageUrl)
	    moreJobUrls = cvbrowser.getJobUrlsDict(nextPageContent)
	    jobUrls.update(moreJobUrls)
	    nextPageUrl = cvbrowser.getNextPageUrl(nextPageContent)
	    if nextPageUrl:
		log.write("Next page URL: " + nextPageUrl + "\n")
	    else:
		log.write("No more pages left... " + "\n")
		break
	log.write("JobUrls Dict:\n" + jobUrls.__str__() + "\n")
	fh = open(indFilename, "w")
	Job.dumpHeader(fh, "csv")
	for url in jobUrls.keys():
	    try:
	    	jobDetailsPage = cvbrowser.getJobDetailsPage(url)
	    except:
		errReason = sys.exc_info()[1].__str__()
		logMsg = "Could not fetch content from '" + url + "': " + errReason + "\n"
		log.write(logMsg)
		continue
	    try:
	    	jobObj = cvbrowser.getJobDetails(jobDetailsPage)
	    except:
		errReason = sys.exc_info()[1].__str__()
		logMsg = "Could not parse HTML content from '" + url + "': " + errReason + "\n"
		log.write(logMsg)
		continue
	    jobObj.dumpData(fh, "csv")
	fh.close()	    
	indCtr += 1
	
    print "Dumped all industry specific jobs pages.\n"
    log.write("Dumped all industry specific jobs pages.\n")
    industryJobsCountDict = {}

    log.close()




if __name__ == "__main__":
    main()
    

    



