# -*- coding:utf-8 -*-
'''
Created on Mar 14, 2015

@author: eyaomai
'''
import sys
sys.path.append(sys.argv[0][:sys.argv[0].rfind('com/stocklens')])

from com.stocklens.stock.common.utils import Logging,  PIDUtils 
from com.stocklens.stock.data.crawlercommon import CrawlerConstants
from com.stocklens.stock.data.mas.mascommon import MASConstant, MASCrawlerManager,MASCrawlerBase
from bs4 import BeautifulSoup
import traceback

class MOSTCrawlerManager(MASCrawlerManager):
    '''
    classdocs
    '''

    LOGGER_NAME_CRAWL_MOST = 'most'
    MINIMAL_THREAD_NUM = 4
    def __init__(self, json_config_file):
        '''
        Constructor
        '''
        super(MOSTCrawlerManager, self).__init__(json_config_file, MOSTCrawlerManager.MINIMAL_THREAD_NUM)
        self._masTitle = '科技部'
        self.logger = Logging.getLogger(MOSTCrawlerManager.LOGGER_NAME_CRAWL_MOST)

    def _initTask(self):
        request = {CrawlerConstants.PARA_CLASS:ImportantNewsCrawler,
                   MASConstant.PARA_URL:'http://www.most.gov.cn/yw/',
                   MASConstant.PARA_PREFIX:'要闻'}
        self._taskList.append(request)        

        request = {CrawlerConstants.PARA_CLASS:WorkAndNoticeCrawler,
                   MASConstant.PARA_URL:'http://www.most.gov.cn/kjbgz/',
                   MASConstant.PARA_PREFIX:'工作'}
        self._taskList.append(request)        
        
        request = {CrawlerConstants.PARA_CLASS:WorkAndNoticeCrawler,
                   MASConstant.PARA_URL:'http://www.most.gov.cn/tztg/',
                   MASConstant.PARA_PREFIX:'通知'}
        self._taskList.append(request)

class ImportantNewsCrawler(MASCrawlerBase):
    MAX_ITEMS = 40
    def __init__(self, controller, dbProxy, request):
        super(ImportantNewsCrawler, self).__init__(controller, 
                                                   dbProxy, 
                                                   request, 
                                                   ImportantNewsCrawler.MAX_ITEMS, 
                                                   MOSTCrawlerManager.LOGGER_NAME_CRAWL_MOST)

    def _crawl(self):
        super(ImportantNewsCrawler, self)._crawl()
        try:
            content = self._fetchContent(self._url, None, False)
            if content is None:
                return
            soup = BeautifulSoup(content)
            divs = soup.findAll('div',{'name':'TRS'})
            divLen = len(divs)
            if divLen==0:
                self.logger.warn('div not found. Format may have change')
                return
            for div in divs[:min(divLen, ImportantNewsCrawler.MAX_ITEMS)]:
                
                tds = div.findAll('td')
                td = tds[1]
                
                ahrefs = td.findAll('a')
                title = ahrefs[0].text
                link = self._url+ahrefs[0]['href']
                fullText = td.text
                lindex = fullText.rfind('(')+1
                rindex = fullText.rfind(')')
                releaseDate = fullText[lindex:rindex]
                self._handleItem(releaseDate, title, link)
        except Exception:
            traceInfo = traceback.format_exc()
            self.logger.warn('Fail to crawl:%s', traceInfo)    
    
class WorkAndNoticeCrawler(MASCrawlerBase):
    MAX_ITEMS = 40
    def __init__(self, controller, dbProxy, request):
        super(WorkAndNoticeCrawler, self).__init__(controller, 
                                                   dbProxy, 
                                                   request, 
                                                   WorkAndNoticeCrawler.MAX_ITEMS, 
                                                   MOSTCrawlerManager.LOGGER_NAME_CRAWL_MOST)

    def _crawl(self):
        super(WorkAndNoticeCrawler, self)._crawl()
        try:
            content = self._fetchContent(self._url, None, False)
            if content is None:
                return
            soup = BeautifulSoup(content)
            tds = soup.findAll('td',{'align':'left'})
            tables = tds[0].findAll('table')
            for table in tables[:-1]:
                tds = table.findAll('td')
                td = tds[1]
                ahrefs = td.findAll('a')
                title = ahrefs[0].text
                link = self._url+ahrefs[0]['href']
                fullText = td.text
                lindex = fullText.rfind('(')+1
                rindex = fullText.rfind(')')
                releaseDate = fullText[lindex:rindex]
                self._handleItem(releaseDate, title, link)
        except Exception:
            traceInfo = traceback.format_exc()
            self.logger.warn('Fail to crawl:%s', traceInfo)    

if __name__ == '__main__':
    if PIDUtils.isPidFileExist('most'):
        print 'Previous MOST Crawler process is on-going, please stop it firstly'
        sys.exit(1)
    import os
    pid = os.getpid()
    PIDUtils.writePid('most', pid)
    Logging.initLogger('conf/crawler/crawler.logging.cfg')
    most = MOSTCrawlerManager('conf/crawler/most.cfg')
    most.start()
    pidutils = PIDUtils('most', most.shutDown, 5, most.logger)
    pidutils.start()
    sys.exit(0)
