# coding=utf8
'''
Created on 2014-07-12

@author: Administrator
'''

import datetime
from core.PageFetcher import *

class JobUiPageFetcher(PageFetcher):
    '''
    classdocs
    '''

    def init(self, url, intent=False):
        return PageFetcher.init(self, url, True)
    
    def analysis(self):
        if self.__html_src__:
            global seen_urls, urls_to_visit
            if self.__recurse__:
                from lxml.html import fromstring
                parsed_html = fromstring(self.__html_src__)
                job_trs = parsed_html.xpath("//ul[@class='searcher-job-detail j-recommendJob']/li")
                if len(job_trs) <= 0:
                    print 'return'
                    return  # here we exit
                
                job_infos = []
                for i in range(1, len(job_trs), 1):
                    base_path = ("//ul[@class='searcher-job-detail j-recommendJob']/li[%s]") % (i)
                    job_title = parsed_html.xpath(base_path + "/div[@class='cfix']/div[@class='fr']/div[@class='cfix']/h2[@class='fl']/a/@title")[0]
                    
                    job_url = parsed_html.xpath(base_path + "/div[@class='cfix']/div[@class='fr']/div[@class='cfix']/h2[@class='fl']/a/@href")[0]
                    ''' need special proceeding for url because there is a redirecting here'''
                    
                    job_url = "http://www.jobui.com" + job_url
                    # job_url = job_url.replace("/tips/redirect.php?link=", "")
                    
                    
                    job_com = parsed_html.xpath(base_path + "/div[@class='cfix']/div[@class='fr']//h2/a/@title")
                    
                    job_site = 'jobui'
                    job_post_date = parsed_html.xpath(base_path + "/div[@class='cfix']/div[@class='fr']/div[@class='cfix']/span[@class='fr']/text()")
                    
                    ''' Create empty template file if no template exists '''
                    self.__template_path__ = Configurator.get_val('page_template_path')
                    netloc = job_site
                    if (not os.path.exists(("%s/%s.xml") % (self.__template_path__, netloc))) and not(netloc == None):
                        logger.warning('No template exists for %s, create one' % (netloc))
                        f = open(("%s/%s.xml") % (self.__template_path__, netloc), 'w')
                        f.close()
                    
                    ll = "%s,%s,%s,%s,%s,%s,%s" % (job_title, job_url, job_com,
#                                                       job_loc, 
                                                      job_post_date, job_site,
                                                      'http://www.jobui.com/', '\n')
                    
                    '''  append the urls info to the queue, but remember here to use the structure (url, job_site)'''
                    blacklist = Configurator.get_val('blacklist')
                    if self.__mutex__.acquire(1):
                        if blacklist == None or job_site.encode('utf8') not in blacklist.split(','):
                            logger.info("append %s to urls_to_visit" % (job_url))                        
                            urls_to_visit.append((job_url, job_site))
                        self.__mutex__.release()
                    job_infos.append(ll)
                    
                logger.info("append %s to seen_urls and remove from urls_to_visit" % (self.__url__))
                
                if self.__mutex__.acquire(1):
                    seen_urls.append(self.__url__)  # add it to the seen list
                    self.__mutex__.release()
                    
                # get the next page URL
                url_str = self.__url__
                items = url_str.split('&')
                import string
                pn = string.atoi((items[2]).split('=')[1])
                pn = pn + 1

                # Add page limit for fetchers
                if pn > string.atoi(Configurator.get_val('crawl_page_num_limit')): return

                if pn <= 44:
                    next_url = ("%s&%s&%s") % (items[0], items[1], ('n=%s') % (pn))
                    logger.info("append %s to urls_to_visit" % (next_url))                

                    if self.__mutex__.acquire(1):
                        seen = seen_urls.__contains__(next_url)
                        self.__mutex__.release()
                    
                    if self.__mutex__.acquire(1):    
                        visited = urls_to_visit.__contains__(next_url)
                        self.__mutex__.release()
                
                    if (not seen) and (not visited):
                        if self.__mutex__.acquire(1):
                            urls_to_visit.append(next_url)
                            self.__mutex__.release()
                # write the info down
                import time
                f = open(("%s/JobUi_search_%s.csv") % (self.__save_path__, time.strftime('%Y-%m-%d', time.localtime(time.time()))), 'a')
                for info in job_infos:
                    f.write(info)
                f.close()
