# -*- coding:utf-8 -*-
'''
Created on Feb 7, 2015

@author: eyaomai
'''
import sys
sys.path.append(sys.argv[0][:sys.argv[0].rfind('com/stocklens')])

from com.stocklens.stock.data.mas.mascommon import MASCrawlerManager, MASConstant, MASCrawlerBase
from com.stocklens.stock.common.utils import Logging,  PIDUtils 
from com.stocklens.stock.data.crawlercommon import CrawlerConstants
from bs4 import BeautifulSoup
import traceback
import datetime
import json

class MIITManager(MASCrawlerManager):
    '''
    classdocs
    '''

    LOGGER_NAME_CRAWL_MIIT = 'miit'
    MINIMAL_THREAD_NUM = 4
    def __init__(self, json_config_file):
        '''
        Constructor
        '''
        super(MIITManager, self).__init__(json_config_file, MIITManager.MINIMAL_THREAD_NUM)
        self._masTitle = '工信部'
        self.logger = Logging.getLogger(MIITManager.LOGGER_NAME_CRAWL_MIIT)

    def _initTask(self):
        '''
        #uncomment until the anti-crawl is resovled
        request = {CrawlerConstants.PARA_CLASS:ImportantNewsCrawler,
                   MASConstant.PARA_URL:'http://www.miit.gov.cn/n11293472/n11293832/n13095885/index.html',
                   MASConstant.PARA_PREFIX:'要闻'}
        self._taskList.append(request)        

        request = {CrawlerConstants.PARA_CLASS:ImportantNewsCrawler,
                   MASConstant.PARA_URL:'http://www.miit.gov.cn/n11293472/n11293832/n11293907/n11368223/index.html',
                   MASConstant.PARA_PREFIX:'工作动态'}
        self._taskList.append(request)        

        request = {CrawlerConstants.PARA_CLASS:ImportantNewsCrawler,
                   MASConstant.PARA_URL:'http://www.miit.gov.cn/n11293472/n11293832/n11294042/n11481465/index.html',
                   MASConstant.PARA_PREFIX:'政策解读'}
        self._taskList.append(request)        
        '''
        
        request = {CrawlerConstants.PARA_CLASS:FilesCrawler,
                   MASConstant.PARA_URL:'http://search.miit.gov.cn/search/search',
                   MASConstant.PARA_PREFIX:'文件发布'}
        self._taskList.append(request)        

class FilesCrawler(MASCrawlerBase):
    
    DATA = 'url=http%%3A%%2F%%2Fwww.miit.gov.cn%%2Fn11293472%%2Fn11293832%%2Fn12843926%%2F&fullText=http&sortKey=showTime&upperLimit=%s&sortFlag=-1&sortType=1&num=20&year=%d&lowerLimit=%s&pageSize=10&pageNow=1'
    MAX_ITEMS = 40
    def __init__(self, controller, dbProxy, request):
        super(FilesCrawler, self).__init__(controller, 
                                                   dbProxy, 
                                                   request, 
                                                   FilesCrawler.MAX_ITEMS, 
                                                   MIITManager.LOGGER_NAME_CRAWL_MIIT)

    def _crawl(self):
        super(FilesCrawler, self)._crawl()
        try:
            now = datetime.datetime.now()
            lowerLimit = now.strftime('%Y')+'-01-01'
            upperLimit = now.strftime('%Y-%m-%d')
            year = now.year
            data = FilesCrawler.DATA % (upperLimit, year, lowerLimit)
            content = self._fetchContent(self._url, data, False)
            if content is None:
                return
            jo  = json.loads(content)
            arrayList = jo['array']
            for item in arrayList:
                title = item['name']
                link = item['url']
                releaseDate = item['showTime']
                self._handleItem(releaseDate, title, link)
        except Exception:
            traceInfo = traceback.format_exc()
            self.logger.warn('Fail to crawl:%s', traceInfo)    

class ImportantNewsCrawler(MASCrawlerBase):
    MAX_ITEMS = 40
    def __init__(self, controller, dbProxy, request):
        super(ImportantNewsCrawler, self).__init__(controller, 
                                                   dbProxy, 
                                                   request, 
                                                   ImportantNewsCrawler.MAX_ITEMS, 
                                                   MIITManager.LOGGER_NAME_CRAWL_MIIT)

    def _crawl(self):
        super(ImportantNewsCrawler, self)._crawl()
        try:
            content = self._fetchContent(self._url, None, False)
            if content is None:
                return
            soup = BeautifulSoup(content)
            tables = soup.findAll('table',{'class':'black14_24'})
            if len(tables)==0:
                self.logger.warn('Format is changed or is anti-crawled')
                return
            for table in tables:
                ahrefs = table.findAll('a')
                title = ahrefs[0]['title']
                tds = table.findAll('td')
                link = self._url+ahrefs[0]['href']
                releaseDate = tds[1].text
                self._handleItem(releaseDate, title, link)
        except Exception:
            traceInfo = traceback.format_exc()
            self.logger.warn('Fail to crawl:%s', traceInfo)    
    
                    
if __name__ == '__main__':
    if PIDUtils.isPidFileExist('miit'):
        print 'Previous MIIT Crawler process is on-going, please stop it firstly'
        sys.exit(1)
    import os
    pid = os.getpid()
    PIDUtils.writePid('miit', pid)
    Logging.initLogger('conf/crawler/crawler.logging.cfg')
    miit = MIITManager('conf/crawler/miit.cfg')
    miit.start()
    pidutils = PIDUtils('miit', miit.shutDown, 5, miit.logger)
    pidutils.start()
    sys.exit(0)
                