'''
Created on 2014-07-12

@author: Administrator
'''
import datetime
from core.PageFetcher import *

class IndeedPageFetcher(PageFetcher):
    '''
    classdocs
    '''


    def init(self, url, intent=False):
        return PageFetcher.init(self, url, True)
    
    def analysis(self):
        if self.__html_src__:
            global seen_urls, urls_to_visit
            if self.__recurse__:
                from lxml.html import fromstring
                parsed_html = fromstring(self.__html_src__.encode('UTF-8'))
                job_trs = parsed_html.xpath("//td[@id='resultsCol']//div[@class='  row  result']")
                if len(job_trs) <= 0:
                    print 'return'
                    return  # here we exit
                
                job_infos = []
                for i in range(1, len(job_trs), 1):
                    base_path = ("//td[@id='resultsCol']//div[@class='  row  result'][%s]") % (i)

                    job_title_list = parsed_html.xpath(base_path + "/h2[@class='jobtitle']/a")
                    job_title = ''
                    if len(job_title_list) > 1:
                        job_title = job_title_list[0].text_content()
                        job_title = ''.join(job_title.split())

                    origin_job_url_list = parsed_html.xpath(base_path + "/h2[@class='jobtitle']/a/@href")
                    origin_job_url = ''
                    if len(origin_job_url_list) > 1:
                        origin_job_url = origin_job_url_list[0]

                    job_com = ''
                    job_com_list = parsed_html.xpath(base_path + "/span[@class='company']/span[@itemprop='name']")
                    if len(job_com_list) > 1:
                        job_com = job_com_list[0].text_content()

                    job_loc = ''
                    job_loc_list = parsed_html.xpath(base_path + "/span[@itemprop='jobLocation']/span[@class='location']/span[@itemprop='addressLocality']")
                    if len(job_loc_list) > 1:
                        job_loc = job_loc_list[0].text_content()

                    job_post_date = ''
                    job_post_date_list = parsed_html.xpath(base_path + "/table/tr/td[@class='snip']//span[@class='date']/text()")
                    if len(job_post_date_list) > 1:
                        job_post_date = job_post_date_list[0]
                    
                    job_site = ''
                    job_site_list = parsed_html.xpath(base_path + "/table/tr/td[@class='snip']//span[@class='sdn']/text()")
                    if len(job_site_list) > 1:
                        job_site = job_site_list[0]
                    
                    ''' Create empty template file if no template exists '''
                    self.__template_path__ = Configurator.get_val('page_template_path')
                    netloc = job_site
                    if (not os.path.exists(("%s/%s.xml") % (self.__template_path__, netloc))) and not(netloc == None):
                        logger.warning('No template exists for %s, create one' % (netloc))
                        f = open(("%s/%s.xml") % (self.__template_path__, netloc), 'w')
                        f.close()
                    
                    '''Use origin_job_url to generate the real job_url, the real job_url is a GET and return in the response -- location '''
                    import httplib
                    conn = httplib.HTTPConnection('119.9.65.186', 80)
                    conn.request('GET',
                                 'http://cn.indeed.com' + origin_job_url,
                                 None,
                                 {'host' : 'cn.indeed.com',
                                  'user-agent' : 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.114 Safari/537.36',
                                  'accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
                                  })
                    job_url = ''
                    result = conn.getresponse()
                    if 302 != result.status:
                        continue
                    else:
                        job_url = result.getheader("Location")
                        conn.close()
                    
                    
                    ll = "%s,%s,%s,%s,%s,%s,%s,%s" % (job_title, job_url, job_com,
                                                      job_loc, job_post_date, job_site,
                                                      'http://cn.indeed.com/', '\n')
                    '''  append the urls info to the queue, but remember here to use the structure (url, job_site)'''
                    blacklist = Configurator.get_val('blacklist')
                    if self.__mutex__.acquire(1):
                        if blacklist == None or job_site.encode('utf8') not in blacklist.split(','):
                            logger.info("append %s to urls_to_visit" % (job_url))                        
                            urls_to_visit.append((job_url, job_site))
                        self.__mutex__.release()
                    job_infos.append(ll)
                    
                logger.info("append %s to seen_urls and remove from urls_to_visit" % (self.__url__))
                
                if self.__mutex__.acquire(1):
                    seen_urls.append(self.__url__)  # add it to the seen list
                    self.__mutex__.release()
                    
                # get the next page URL
                url_str = self.__url__
                items = url_str.split('&')
                import string
                pn = string.atoi((items[1]).split('=')[1])
                current_page = pn /10

                # Add page limit for fetchers
                if current_page > string.atoi(Configurator.get_val('crawl_page_num_limit')): return

                pn = pn + 10
                if pn <= 990:
                    next_url = ("%s&%s") % (items[0], ('start=%s') % (pn))
                    logger.info("append %s to urls_to_visit" % (next_url))                

                    if self.__mutex__.acquire(1):
                        seen = seen_urls.__contains__(next_url)
                        self.__mutex__.release()
                    
                    if self.__mutex__.acquire(1):    
                        visited = urls_to_visit.__contains__(next_url)
                        self.__mutex__.release()
                
                    if (not seen) and (not visited):
                        if self.__mutex__.acquire(1):
                            urls_to_visit.append(next_url)
                            self.__mutex__.release()

                # write the info down
                import time
                f = open(("%s/indeeed_search_%s.csv") % (self.__save_path__, time.strftime('%Y-%m-%d', time.localtime(time.time()))), 'a')
                for info in job_infos:
                    f.write(info)
                f.close()
