'''
Created on Apr 7, 2016

@author: eyaomai
'''
import sys
from traitlets.config.application import catch_config_error
sys.path.append(sys.argv[0][:sys.argv[0].rfind('com/stocklens')])
import traceback
from com.stocklens.stock.common.utils import Logging,  PIDUtils 
from com.stocklens.stock.data.crawlercommon import CrawlerConstants, CrawlerManager, CrawlerBase
from com.stocklens.stock.data.sina.sinacommon import SinaConstants
import json

class EmRzrqCrawlerManager(CrawlerManager):
    LOGGER_NAME_CRAWL = 'erq'
    CONFIG_FILE_TOTALPAGE = 'totalpage'
    
    def __init__(self, json_config_file):
        '''
        Constructor
        '''
        super(EmRzrqCrawlerManager, self).__init__(json_config_file, 0.1, None)
        self.logger = Logging.getLogger(EmRzrqCrawlerManager.LOGGER_NAME_CRAWL)
    
    def _initTask(self):
        self._taskList.append(1000)

    def _generateTask(self, task, checkTaskList=True):
        if super(EmRzrqCrawlerManager, self)._generateTask(task, True) is False:
            return False
        pageSize = self._taskList.pop(0)
        task[EmRzrqCrawler.PARA_PAGESIZE] = pageSize 
        task[CrawlerConstants.PARA_CLASS] = EmRzrqCrawler
        return True

class EmRzrqCrawler(CrawlerBase):
    PARA_PAGESIZE = 'pageSize'
    JS_URL ='http://datainterface.eastmoney.com/EM_DataCenter/JS.aspx?type=FD&sty=SHSZHSSUM&st=0&sr=1&p=%d&ps=%d&js=var%%20qsHZIRkC={pages:(pc),data:[(x)]}&rt=48665290'
    SHEET_NAME = 'rzrq.csv'
    def __init__(self, controller, dbProxy, request):
        super(EmRzrqCrawler, self).__init__(controller, dbProxy, request)
        self.__pageSize = request[EmRzrqCrawler.PARA_PAGESIZE]
        self.logger = controller.logger

    def run(self):
        super(EmRzrqCrawler, self).run()
        status = self.__parse()
        self._reportDone(status)
        self.logger.info('Finish Crawl')

    def __parse(self):
        pageNum = 1
        datadict = dict()
        while True:
            url = EmRzrqCrawler.JS_URL % (pageNum, self.__pageSize)
            content = self._fetchContent(url)
            lindex = content.find('{')
            rindex = content.rfind('}')
            js = content[lindex:rindex+1]
            js = js.replace('pages','"pages"').replace('data','"data"')
            jo = json.loads(js)
            totalPage = jo['pages']
            data = jo['data']
            for item in data:
                fields = item.split(',')
                datadict[fields[0]] = (fields[10], fields[11], fields[12])
            
            if pageNum<totalPage:
                pageNum+=1
            else:
                break
            
            self.controller.randomSleep()
        
        if len(datadict)>0:
            sortedList = sorted(datadict.iteritems(), key=lambda x:x[0], reverse = False)
            fn = open(EmRzrqCrawler.SHEET_NAME, 'w')
            fn.write('date,SH,SZ,TOTAL\n')
            for item in sortedList:
                sdate = item[0]
                shdata = item[1][0]
                szdata = item[1][1]
                totaldata = item[1][2]
                fn.write('%s,%s,%s,%s\n'%(sdate, shdata, szdata, totaldata))
            fn.flush()
            fn.close()
        
        return CrawlerConstants.VAL_STATUS_FINISH

if __name__ == '__main__':
    if PIDUtils.isPidFileExist('erq'):
        print 'Previous East Money Rzrq process is on-going, please stop it firstly'
        sys.exit(1)
    import os
    pid = os.getpid()
    PIDUtils.writePid('erq', pid)
    Logging.initLogger('conf/crawler/crawler.logging.cfg')
    erq = EmRzrqCrawlerManager('conf/crawler/erq.cfg')
    erq.start()
    pidutils = PIDUtils('erq', erq.shutDown, 5, erq.logger)
    pidutils.start()
    sys.exit(0)                