# coding=utf8
'''
Created on 2015-3-14

@author: xizhzhao
'''
from core.JsonDataFetcher import *
import string
import abc

class SougouPageFetcher(JsonDataFetcher):
    def init(self, url, intent=False, recurse=False):
        return JsonDataFetcher.init(self, url, True)

    def preProcess(self):
        # remove "call_quan(" at the beginning and ");" in the end
        self.__data_src__ = self.__data_src__[10:len(self.__data_src__) - 2]
        
    def analysis(self):
        self.__encoding__ = 'utf8'
        if self.__json_data__:
            global seen_urls, urls_to_visit
            rstLst = self.__json_data__['result'][0]['srrs']
            if len(rstLst) > 0:
                
                job_infos = []
                self.__encoding__
                for jm in rstLst:
                    job_title = jm['title'].replace('[em]', '').encode(self.__encoding__)
                    job_title = job_title.replace('[/em]', '')
                    job_url = jm['joburl'].encode(self.__encoding__)
                        
                    job_edu = jm['education'].encode(self.__encoding__)
                    job_expr = jm['experience'].encode(self.__encoding__)
                    job_slry = jm['salary'].encode(self.__encoding__)
                        
                    job_com = jm['employer'].replace('[em]', '').encode(self.__encoding__)
                    job_com = job_com.replace('[/em]', '')
                    
                    job_loc = jm['city'].encode(self.__encoding__)
                    job_post_date = jm['modifyDate'].encode(self.__encoding__)
                    job_site = jm['source']
                    job_site = unicode(job_site)
                    
                    job_site_url = jm['sourcelink'].encode(self.__encoding__)
                    
                    self.__template_path__ = Configurator.get_val('page_template_path')
                    netloc = job_site
                    if (not os.path.exists(("%s/%s.xml") % (self.__template_path__, netloc))) and not(netloc == None):
                        logger.warning('No template exists for %s, create one' % (netloc))
                        f = open(("%s/%s.xml") % (self.__template_path__, netloc), 'w')
                        f.close()
                    
                    # store the infos
                    # ls = os.linesep
                    ll = "%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s" % (job_title, job_url, job_edu,
                                                               job_expr, job_slry, job_com,
                                                               job_loc, job_post_date, job_site,
                                                               job_site_url, '\n')
                    
                    '''  append the urls info to the queue, but remember here to use the structure (url, job_site)'''
                    blacklist = Configurator.get_val('blacklist')
                    if self.__mutex__.acquire(1):
                        # add black list check
                        if blacklist == None or job_site.encode('utf8') not in blacklist.split(','):
                            logger.info("append %s to urls_to_visit" % (job_url))                            
                            urls_to_visit.append((job_url, job_site))
                        self.__mutex__.release()
                    
                    job_infos.append(ll)
                
                logger.info("append %s to seen_urls and remove from urls_to_visit" % (self.__url__))
                
                if self.__mutex__.acquire(1):
                    seen_urls.append(self.__url__)  # add it to the seen list
                    self.__mutex__.release()
                
                '''
                 Need to modify start from here!!
                '''
                
                # get the next page URL
                url_str = self.__url__
                items = url_str.split('&')
                pn = string.atoi((items[2]).split('=')[1])
                pn = (pn + 1)

                # Add page limit for fetchers
                if pn > string.atoi(Configurator.get_val('crawl_page_num_limit')): return
                
                next_url = ("%s&%s&%s") % (items[0], items[1], ('page=%s') % (pn))                        
                
                if self.__mutex__.acquire(1):
                    seen = seen_urls.__contains__(next_url)
                    self.__mutex__.release()
                
                if self.__mutex__.acquire(1):    
                    visited = urls_to_visit.__contains__(next_url)
                    self.__mutex__.release()
                        
                logger.info("append %s to urls_to_visit" % (next_url))
                if (not seen) and (not visited):
                    if self.__mutex__.acquire(1):
                        urls_to_visit.append({'url' : next_url, 'controller' : 'SougouPageFetcher'})
                        self.__mutex__.release()
                
                # write the info down
                import time
                f = open(("%s/sougou_search_%s.csv") % (self.__save_path__, time.strftime('%Y-%m-%d', time.localtime(time.time()))), 'a')
                for info in job_infos:
                    f.write(info)
                f.close()
