'''
Created on Nov 14, 2015

@author: eyaomai
'''
import sys
sys.path.append(sys.argv[0][:sys.argv[0].rfind('com/stocklens')])
import traceback
from bs4 import BeautifulSoup
from com.stocklens.stock.common.utils import Logging,  PIDUtils 
from com.stocklens.stock.data.crawlercommon import CrawlerConstants, CrawlerManager, CrawlerBase

class JdCommentCrawlerManager(CrawlerManager):
    LOGGER_NAME_CRAWL = 'jdc'
    CONFIG_FILE_TOTALPAGE = 'totalpage'
    CONFIG_FILE_CAGEGORYID = 'categoryid'
    def __init__(self, json_config_file):
        '''
        Constructor
        '''
        super(JdCommentCrawlerManager, self).__init__(json_config_file, 0.1, None)
        self.logger = Logging.getLogger(JdCommentCrawlerManager.LOGGER_NAME_CRAWL)
    
    def _getNonCommonConfig(self, config):
        self.__categoryid = config[JdCommentCrawlerManager.CONFIG_FILE_CAGEGORYID]
        self.__totalpage = config[JdCommentCrawlerManager.CONFIG_FILE_TOTALPAGE]

    def _generateTask(self, task, checkTaskList=True):
        if super(JdCommentCrawlerManager, self)._generateTask(task, True) is False:
            return False
        itemid = self._taskList.pop(0)
        task[CrawlerConstants.PARA_CLASS] = JdCommentCrawler
        task[JdCommentCrawler.PARA_ITEMID] = itemid
        task[JdCommentCrawler.PARA_TOTALPAGE] = self.__totalpage

        return True

    def _initTask(self):
        sql = 'SELECT ITEMID FROM jditem WHERE CATEGORYID ="%s"' % self.__categoryid
        if self.dbProxy.execute(sql)>0:
            self._taskList.extend([x[0] for x in self.dbProxy.cur.fetchall()])

class JdCommentCrawler(CrawlerBase):
    #http://club.jd.com/review/10178500-1-2-2.html, 
    #<itemid>-1-<page>-<ratecategory>
    URL = 'http://club.jd.com/review/%s-1-%d-%d.html'
    PARA_TOTALPAGE = 'totalpage'
    PARA_ITEMID = 'itemid'            
    def __init__(self, controller, dbProxy, request):
        super(JdCommentCrawler, self).__init__(controller, dbProxy, request)
        self.__totalpage = request[JdCommentCrawler.PARA_TOTALPAGE]
        self.__itemid = request[JdCommentCrawler.PARA_ITEMID]
        self.logger = controller.logger

    def run(self):
        super(JdCommentCrawler, self).run()
        status = CrawlerConstants.VAL_STATUS_FINISH
        for ratecategory in range(1,4):
            for page in range(1,self.__totalpage+1):
                url = JdCommentCrawler.URL % (self.__itemid, page, ratecategory)
                self.logger.info('Start Crawl %s', url)
                content = self._fetchContent(url)
                if content is None:
                    status = CrawlerConstants.VAL_STATUS_FAILURE
                else:
                    status = self.__parse(content, url,ratecategory)
                    if status == CrawlerConstants.VAL_STATUS_NOMORE:
                        break
                self.controller.randomSleep();
        self._reportDone(status)
        self.logger.info('Finish Crawl')
    
    def __parse(self, content, url, ratecategory):
        try:
            soup = BeautifulSoup(content)
            items = soup.findAll('div',{'class','i-item'})
            if items is None or len(items)==0:
                return CrawlerConstants.VAL_STATUS_NOMORE
            sql = 'INSERT INTO jdcomment (itemid, commentid, commentdate, commentstr, star, ratecategory) values '
            values = list()
            for item in items:
                spans = item.findAll('span')
                star = int(spans[0].get('class')[1][-1])
                commentdate = spans[1].text
                commentstr = item.find('div',{'class':'comment-content'}).text.strip().strip('\r').strip('\n').strip('\t').replace('"','\\"')
                ahref = item.find('a')
                link = ahref.get('href')
                lindex = link.rfind('/')
                rindex = link.rfind('.')
                commentid = link[lindex+1:rindex]
                
                values.append('("%s","%s","%s","%s",%s, %d)' % (self.__itemid, commentid, commentdate, commentstr, star, ratecategory))
            
            if len(values)>0:
                if self.dbProxy.execute(sql + ','.join(values))>0:
                    self.totalNum+=len(values)
                    self.dbProxy.commit()
            return CrawlerConstants.VAL_STATUS_FINISH
        except Exception:
            traceInfo = traceback.format_exc()
            self.logger.warn('Fail to parse:%s:%s', url, traceInfo)
            return CrawlerConstants.VAL_STATUS_FAILURE
if __name__ == '__main__':
    if PIDUtils.isPidFileExist('jdc'):
        print 'Previous JDC (JD comment) Crawler process is on-going, please stop it firstly'
        sys.exit(1)
    import os
    pid = os.getpid()
    PIDUtils.writePid('jdc', pid)
    Logging.initLogger('conf/crawler/crawler.logging.cfg')
    jdc = JdCommentCrawlerManager('conf/crawler/jdc.cfg')
    jdc.start()
    pidutils = PIDUtils('jdc', jdc.shutDown, 5, jdc.logger)
    pidutils.start()
    sys.exit(0)
