__author__ = 'Soul'


from core.JsonDataFetcher import *
import string

class ZhaopinBaiduPageFetcher(JsonDataFetcher):
    def init(self, url, intent=False, recurse=False):
        return JsonDataFetcher.init(self, url, True)

    def preProcess(self):
        return

    def analysis(self):
        self.__encoding__ = 'utf8'
        if self.__json_data__:
            global seen_urls, urls_to_visit
            rstLst = self.__json_data__['data']['data']['disp_data']
            if len(rstLst) > 0:
                job_infos = []

                for jm in rstLst:
                    try:
                        job_title = ""
                        try:
                            job_title = jm['title'].encode(self.__encoding__)
                        except AttributeError as e1:
                            pass
                        except UnicodeEncodeError as e2:
                            pass

                        job_url = ""
                        try:
                            job_url = jm['url'].encode(self.__encoding__)
                        except AttributeError as e1:
                            pass
                        except UnicodeEncodeError as e2:
                            pass

                        job_edu = ""
                        try:
                            job_edu = jm['education'].encode(self.__encoding__)
                        except AttributeError as e1:
                            pass
                        except UnicodeEncodeError as e2:
                            pass

                        job_expr = ""
                        try:
                            job_expr = jm['ori_experience'].encode(self.__encoding__)
                        except AttributeError as e1:
                            pass
                        except UnicodeEncodeError as e2:
                            pass

                        job_slry = ""
                        try:
                            job_slry = jm['salary'].encode(self.__encoding__)
                        except AttributeError as e1:
                            pass
                        except UnicodeEncodeError as e2:
                            pass

                        job_com = ""
                        try:
                            job_com = jm['commonname'].encode(self.__encoding__)
                        except AttributeError as e1:
                            pass
                        except UnicodeEncodeError as e2:
                            pass

                        job_loc = ""
                        try:
                            job_loc = jm['city'].encode(self.__encoding__)
                        except AttributeError as e1:
                            pass
                        except UnicodeEncodeError as e2:
                            pass

                        job_post_date = ""
                        try:
                            job_post_date = jm['_update_time'].encode(self.__encoding__)
                        except AttributeError as e1:
                            pass
                        except UnicodeEncodeError as e2:
                            pass

                        job_site = ""
                        try:
                            job_site = jm['source']
                            job_site = unicode(job_site)
                        except AttributeError as e1:
                            pass
                        except UnicodeEncodeError as e2:
                            pass

                        job_site_url = ""
                        try:
                            job_site_url = jm['sourcelink'].encode(self.__encoding__)
                        except AttributeError as e1:
                            pass
                        except UnicodeEncodeError as e2:
                            pass

                        self.__template_path__ = Configurator.get_val('page_template_path')
                        netloc = job_site
                        if (not os.path.exists(("%s/%s.xml") % (self.__template_path__, netloc))) and not(netloc == None):
                            logger.warning('No template exists for %s, create one' % (netloc))
                            f = open(("%s/%s.xml") % (self.__template_path__, netloc), 'w')
                            f.close()

                        # store the infos
                        # ls = os.linesep
                        ll = "%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s" % (job_title, job_url, job_edu,
                                                                   job_expr, job_slry, job_com,
                                                                   job_loc, job_post_date, job_site,
                                                                   job_site_url, '\n')

                        '''  append the urls info to the queue, but remember here to use the structure (url, job_site)'''
                        blacklist = Configurator.get_val('blacklist')
                        if self.__mutex__.acquire(1):
                            # add black list check
                            if blacklist == None or job_site.encode('utf8') not in blacklist.split(',') and job_url:
                                logger.info("append %s to urls_to_visit" % (job_url))
                                urls_to_visit.append((job_url, job_site))
                            self.__mutex__.release()

                        job_infos.append(ll)
                    except:
                        import sys
                        e = sys.exc_info()[0]
                        print e
                        continue

                logger.info("append %s to seen_urls and remove from urls_to_visit" % (self.__url__))

                if self.__mutex__.acquire(1):
                    seen_urls.append(self.__url__)  # add it to the seen list
                    self.__mutex__.release()

                '''
                 Need to modify start from here!!
                '''

                # get the next page URL
                url_str = self.__url__
                items = url_str.split('&')
                rn = string.atoi((items[3]).split('=')[1])
                pn = string.atoi((items[4]).split('=')[1])
                current_page = (pn / rn + 1)

                # Add page limit for fetchers
                if current_page > string.atoi(Configurator.get_val('crawl_page_num_limit')): return

                pn = current_page * rn

                next_url = ("%s&%s&%s&%s&%s") % (items[0], items[1], items[2], items[3],('pn=%s') % (pn))

                if self.__mutex__.acquire(1):
                    seen = seen_urls.__contains__(next_url)
                    self.__mutex__.release()

                if self.__mutex__.acquire(1):
                    visited = urls_to_visit.__contains__(next_url)
                    self.__mutex__.release()

                logger.info("append %s to urls_to_visit" % (next_url))
                if (not seen) and (not visited):
                    if self.__mutex__.acquire(1):
                        urls_to_visit.append({'url' : next_url, 'controller' : 'ZhaopinBaiduPageFetcher'})
                        self.__mutex__.release()

                # write the info down
                import time
                f = open(("%s/zhaopinbaidu_search_%s.csv") % (self.__save_path__, time.strftime('%Y-%m-%d', time.localtime(time.time()))), 'a')
                for info in job_infos:
                    f.write(info)
                f.close()