#!/usr/bin/env python
# -*- coding:utf-8 -*-

"""
__author__ : xyh
__date__: 2016-09-14
新三板公告爬虫(源:巨潮网),手动启动,
可以通过父文件夹中的config.cfg文件中的[input]下的xsb_date_range设置公告发布的起始时间
"""

import json
import sys
import os
import time
from ConfigParser import ConfigParser

from scpy.logger import get_logger

from base_crawler import StockCrawler

reload(sys)
sys.setdefaultencoding("utf-8")
logger = get_logger(__file__)

DB_NAME, COLL_NAME = 'stockProj', 'xinsanban'

QUERY_URL = 'http://www.cninfo.com.cn/cninfo-new/announcement/query'

CATEGORY = {
    "category_cxpl_gfzr": u"持续信息披露",
    "category_dqgg_gfzr": u"定期公告",
    "category_lsgg_gfzr": u"临时公告",
    "category_scpl_gfzr": u"首次信息披露",
    "category_zjjg_gfzr": u"中介机构公告",

    "category_cxpl_lwts": u"持续信息披露",
    "category_dqgg_lwts": u"定期公告",
    "category_lsgg_lwts": u"临时公告",
    "category_scpl_lwts": u"首次信息披露",
    "category_zjjg_lwts": u"中介机构公告",
}

class XsbCrawler(StockCrawler):
    def __init__(self, category, date_range=None, page=1):
        self.source_type = (u'两网公司及退市公司', u'股份转让系统挂牌公司')[category.endswith('gfzr')]
        self.column = (u'staq_net_delisted', u'neeq_company')[category.endswith('gfzr')]
        post_data = {
            'stock': '',
            'searchkey': '',
            'plate': '',
            'category': '%s' % category,
            'trade': '',
            'column': self.column,
            'columnTitle': u'历史公告查询',
            'pageNum': 0,
            'pageSize': 50,
            'tabName': 'fulltext',
            'sortName': '',
            'sortType': '',
            'limit': '',
            'showTitle': '',  # u'category_cxpl_gfzr/category/持续信息披露',
            'seDate': '',
        }
        super(XsbCrawler, self).__init__(post_data=post_data,
                                         date_range=date_range,
                                         page=page)
        self.db = 'stockProj'
        self.collection = 'xinsanban'
        self.qurey_url = 'http://www.cninfo.com.cn/cninfo-new/announcement/query'
        self.name = 'xsb'

# -------------------------parse 重写基类-----------------------
    def parse(self, item):
        result = {
            '_id': item['announcementId'],
            'stockCode': item['secCode'],
            'companyShortName': item['secName'],
            'title': item['announcementTitle'],
            'type': [CATEGORY[self.post_data.get('category', None)]],
            'announcementTime': self.parse_time_stamp(item['announcementTime'] / 1000),
            'announcementId': item['announcementId'],
            'filetype': str(item['adjunctType']).lower(),
            'downloadUrl': 'http://www.cninfo.com.cn/' + item['adjunctUrl'],
            'section': item['pageColumn'],
            'orgId': item['orgId'],
            'columnId': item['columnId'],
            'associateAnnouncement': item['associateAnnouncement'],
            'sourceType': self.source_type,
        }
        return result

    def parse_time_stamp(self, time_stamp):
        return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time_stamp))

# -------------------------save 继承基类-----------------------
# -------------------------run 继承基类------------------------
# -------------------------END: class StockCrawler----------------------


def main():
    config = ConfigParser()
    config_path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'config.cfg'))
    with open(config_path, 'r') as cfgfile:
        config.readfp(cfgfile)

    date_range = config.get('input', 'xsb_date_range')
    for category in CATEGORY:
        logger.info('crawling %s-%s' % (category, CATEGORY[category]))
        XsbCrawler(category, date_range).run()

if __name__ == '__main__':
    main()
