'''
Created on 2014-5-10

@author: xizhzhao
'''
from lxml.html import *
import datetime
from core.PageFetcher import *
from xparser.TemplateParser import *

class QcwyPageFetcher(PageFetcher):
    def init(self, url, intent=False,recurse=False):
        '''
        http://search.51job.com/jobsearch/search_result.php?fromJs=0&keyword=%E7%94%B5%E5%8A%A8%E6%B1%BD%E8%BD%A6&curr_page=2&ord_field=1
        '''
        return PageFetcher.init(self, url, True)
        '''
        list_type=0/1
        ord_field=0
        0 -- order by update_date
        1 -- order by simularity
        
        keywordtype=1        
        '''
    
    def analysis(self):
        if self.__html_src__:        
            global seen_urls, urls_to_visit
            if self.__recurse__:
                parsed_html = fromstring(self.__html_src__)
                job_trs = parsed_html.xpath("//div[@id='resultList']/div[@class='el']")
                if len(job_trs) < 3:
                    return  # here we exit
                
                job_infos = []
                for i in range(1, len(job_trs), 1):
                    base_path = ("//div[@id='resultList']/div[@class='el'][%s]") % (str(i))
                    job_title = parsed_html.xpath(base_path + "/p[@class='t1 ']/span/a/@title")
                    
                    job_url = parsed_html.xpath(base_path + "/p[@class='t1 ']/span/a/@href")[0]
                    job_com = parsed_html.xpath(base_path + "/span[@class='t2']/a")[0].text_content()
                    job_loc = parsed_html.xpath(base_path + "/span[@class='t3']/text()")[0]
                    job_post_date = parsed_html.xpath(base_path + "/span[@class='t5']/text()")[0]
                    
                    job_site = '51Job'
                    
                    ''' Create empty template file if no template exists '''
                    self.__template_path__ = Configurator.get_val('page_template_path')
                    netloc = job_site
                    if (not os.path.exists(("%s/%s.xml") % (self.__template_path__, netloc))) and not(netloc == None):
                        logger.warning('No template exists for %s, create one' % (netloc))
                        f = open(("%s/%s.xml") % (self.__template_path__, netloc), 'w')
                        f.close()
                    
                    ll = "%s,%s,%s,%s,%s,%s,%s,%s" % (job_title, job_url, job_com,
                                                      job_loc, job_post_date, job_site,
                                                      'http://www.51job.com/', '\n')
                    
                    '''  append the urls info to the queue, but remember here to use the structure (url, job_site)'''
                    if self.__mutex__.acquire(1):
                        logger.info("append %s to urls_to_visit" % (job_url))
                        urls_to_visit.append((job_url, job_site))
                        self.__mutex__.release()
                    job_infos.append(ll)
                    
                logger.info("append %s to seen_urls and remove from urls_to_visit" % (self.__url__))
                
                if self.__mutex__.acquire(1):
                    seen_urls.append(self.__url__)  # add it to the seen list
                    self.__mutex__.release()
                    
                # get the next page URL
                url_str = self.__url__
                items = url_str.split('&')
                import string
                pn = string.atoi((items[2]).split('=')[1])
                pn = pn + 1

                # Add page limit for fetchers
                if pn > string.atoi(Configurator.get_val('crawl_page_num_limit')): return

                next_url = ("%s&%s&%s&%s") % (items[0], items[1], ('curr_page=%s') % (pn), items[3])
                logger.info("append %s to urls_to_visit" % (next_url))

                if self.__mutex__.acquire(1):
                    seen = seen_urls.__contains__(next_url)
                    self.__mutex__.release()

                if self.__mutex__.acquire(1):
                    visited = urls_to_visit.__contains__(next_url)
                    self.__mutex__.release()

                if (not seen) and (not visited):
                    if self.__mutex__.acquire(1):
                        urls_to_visit.append(next_url)
                        self.__mutex__.release()
                # write the info down
                import time
                f = open(("%s/51Job_search_%s.csv") % (self.__save_path__, time.strftime('%Y-%m-%d', time.localtime(time.time()))), 'a')
                for info in job_infos:
                    f.write(info)
                f.close()