
from core.PageFetcher import *
import string

class BaiduPageFetcher(PageFetcher):
    def init(self, url, intent=False, recurse=False):
        return PageFetcher.init(self, url, True)
#        '''
#            rn  -- row num displayed in each page
#            pn  -- (current page number - 1) * rn
#            
#            sort_key:
#            Blank  default
#            4 Time Seq
#            5 Salary Seq
#            
#            sort_type:
#            Blank  default
#            0 ADSC
#            1 DESC
#        '''
        
           
    def analysis(self):
        if self.__html_src__:        
            global seen_urls, urls_to_visit
            if self.__recurse__:
                from lxml.html import fromstring
                parsed_html = fromstring(self.__html_src__)
                job_trs = parsed_html.xpath("//table[@id='job_table']/tr")
                if len(job_trs) == 0:
                    return 
                
                job_infos = []
                for i in range(2, len(job_trs)):
                    base_path = ("//table[@id='job_table']/tr[%s]") % (i)
                    # read the infos from the page
                    
                    #job_title_prefix = parsed_html.xpath(base_path + "/td[1]/a/em/text()")
                    #job_title_surfix = parsed_html.xpath(base_path + "/td[1]/a/text()")[0]
#                     if len(job_title_prefix) == 1:
#                         job_title = job_title_prefix[0] + job_title_surfix
#                     else:
#                         job_title = job_title_surfix                    
                    job_title = parsed_html.xpath(base_path + "/td[1]/a")[0].text_content()
                    job_title = ''.join(job_title.split())
                    
                    job_url = parsed_html.xpath(base_path + "/td[1]/a/@href")[0]
                        
                    job_edu = parsed_html.xpath(base_path + "/td[1]/span[1]/text()")[0]
                    job_edu = ''.join(job_edu.split())
                    job_expr = parsed_html.xpath(base_path + "/td[1]/span[2]/text()")[0]
                    job_expr = ''.join(job_expr.split())
                    job_slry = parsed_html.xpath(base_path + "/td[1]/span[3]/text()")[0]
                    job_slry = ''.join(job_slry.split())
                        
                    job_com = parsed_html.xpath(base_path + "/td[2]/text()")[0]
                    job_loc = parsed_html.xpath(base_path + "/td[3]/text()")[0]
                    job_post_date = parsed_html.xpath(base_path + "/td[4]/text()")[0]
                    job_site = parsed_html.xpath(base_path + "//td[5]/a/text()")[0]
                    job_site_url = parsed_html.xpath(base_path + "/td[5]/a/@href")[0]
                    
                    job_site = unicode(job_site)
                    ''' Create empty template file if no template exists '''
                    self.__template_path__ = Configurator.get_val('page_template_path')
                    netloc = job_site
                    if (not os.path.exists(("%s/%s.xml") % (self.__template_path__, netloc))) and not(netloc == None):
                        logger.warning('No template exists for %s, create one' % (netloc))
                        f = open(("%s/%s.xml") % (self.__template_path__, netloc), 'w')
                        f.close()
                    
                    # store the infos
                    # ls = os.linesep
                    ll = "%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s" % (job_title, job_url, job_edu,
                                                               job_expr, job_slry, job_com,
                                                               job_loc, job_post_date, job_site,
                                                               job_site_url, '\n')
                    
                    '''  append the urls info to the queue, but remember here to use the structure (url, job_site)'''
                    blacklist = Configurator.get_val('blacklist')
                    if self.__mutex__.acquire(1):
                        # add black list check
                        if blacklist == None or job_site.encode('utf8') not in blacklist.split(','):
                            logger.info("append %s to urls_to_visit" % (job_url))                            
                            urls_to_visit.append((job_url, job_site))
                        self.__mutex__.release()
                    
                    job_infos.append(ll)
                                          
                
                logger.info("append %s to seen_urls and remove from urls_to_visit" % (self.__url__))
                
                if self.__mutex__.acquire(1):
                    seen_urls.append(self.__url__)  # add it to the seen list
                    self.__mutex__.release()
                
                # get the next page URL
                url_str = self.__url__
                items = url_str.split('&')
                rn = string.atoi((items[2]).split('=')[1])
                pn = string.atoi((items[3]).split('=')[1])
                current_page = (pn / rn + 1)

                # Add page limit for fetchers
                if current_page > string.atoi(Configurator.get_val('crawl_page_num_limit')): return
                pn = current_page * rn

                next_url = ("%s&%s&%s&%s&%s&%s") % (items[0], items[1], ('rn=%s') % (rn), ('pn=%s') % (pn), items[4], items[5])                        
                
                if self.__mutex__.acquire(1):
                    seen = seen_urls.__contains__(next_url)
                    self.__mutex__.release()
                
                if self.__mutex__.acquire(1):    
                    visited = urls_to_visit.__contains__(next_url)
                    self.__mutex__.release()
                        
                logger.info("append %s to urls_to_visit" % (next_url))
                if (not seen) and (not visited):
                    if self.__mutex__.acquire(1):
                        urls_to_visit.append(next_url)
                        self.__mutex__.release()
                
                # write the info down
                import time
                f = open(("%s/baidu_search_%s.csv") % (self.__save_path__, time.strftime('%Y-%m-%d', time.localtime(time.time()))), 'a')
                for info in job_infos:
                    f.write(info)
                f.close()
