#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
__author__ : xyh
__date__: 2016-09-14
新三板预披露公告爬虫(源:中小企业股转系统),手动启动,
可以通过父文件夹中的config.cfg文件中的[input]下的pre_xsb_date_range设置公告发布的起始时间
"""

import os
import json
import sys
import re
from ConfigParser import ConfigParser
from scpy.logger import get_logger
from xtls.timeparser import parse_time

from base_crawler import StockCrawler


reload(sys)
sys.setdefaultencoding("utf-8")
logger = get_logger(__file__)


def get_post_headers():
    post_headers = {
        'X-Requested-With': 'XMLHttpRequest',
        'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
        'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.73 Safari/537.36',
        'Host': 'www.neeq.com.cn',
    }
    return post_headers


def str_json(html):
    json_str = re.findall('\((.+)\)', html)[0]
    json_data = json.loads(json_str)
    return json_data


def get_post_data(page, start_date='', end_date=''):
    if not start_date:
        start_date = parse_time(u'1天前').split(' ')[0]
    if not end_date:
        end_date = parse_time(u'1秒前').split(' ')[0]

    post_data = {
        'disclosureType': 9,
        'page': page,
        'companyCd': u'公司名称/拼音/代码',
        'keyword': u'关键字',
        'startTime': start_date,
        'endTime': end_date,
    }
    return post_data



class PreXsbCrawler(StockCrawler):
    def __init__(self, date_range=None, page=1):
        post_data = {
            'disclosureType': 9,
            'page': 0,
            'companyCd': u'公司名称/拼音/代码',
            'keyword': u'关键字',
            'startTime': '',
            'endTime': '',
        }

        super(PreXsbCrawler, self).__init__(post_data=post_data,
                                            date_range=date_range,
                                            page=page)
        self.db = 'stockProj'
        self.collection = 'preDisclosure'
        self.qurey_url = 'http://www.neeq.com.cn/disclosureInfoController/infoResult.do?callback=jQuery183046117876726202667_1468808208911'
        self.name = 'pre_xsb'

# -------------------------parse 重写基类-----------------------
    def parse(self, item):
        result = {
            '_id': 'xsb-{}'.format(item.get('disclosureCode'), ''),
            'stockCode': '',
            'companyShortName': None,
            'title': item.get('disclosureTitle'),
            'type': [u'新三板审查公开信息'],
            'announcementTime': item.get('publishDate', '') + ' 00:00:00',
            'announcementId': item.get('disclosureCode'),
            'filetype': item.get('fileExt', '').lower(),
            'downloadUrl': 'http://www.neeq.com.cn' + item.get('destFilePath'),
            'section': None,
            'orgId': None,
            'columnId': None,
            'associateAnnouncement': None,
            'stockMarket': 'xsb',
        }
        return result

# -------------------------save 继承基类-------------------------------------
# -------------------------run  重写基类-------------------------------------
    def run(self):
        try:
            while True:
                logger.info('now page : %s' % self.page)
                html = self.get_html_content()
                if not html:
                    logger.warn('Non Response %d-%s-%s' % (self.page, self.start_date or u'1天前', self.end_date or u'1秒前'))
                    self.page += 1
                    continue

                annos = str_json(html)[0].get('listInfo', dict()).get('content', list())
                if not annos:
                    return

                self.deal_every_page(annos)
                self.page += 1

        except KeyboardInterrupt:
            return
        except Exception, e:
            logger.exception(e)

    def get_html_content(self):
        retry_max = 3
        for _ in xrange(retry_max):
            post_data = get_post_data(self.page, self.start_date, self.end_date)
            html = self.post(self.qurey_url, data=post_data, headers=get_post_headers())
            if html:
                return html

        return None

    def deal_every_page(self, annos):
        for item in annos:
            anno = self.parse(item)
            rst = self.save(anno)
            if rst == 1:
                logger.info('onemore %s' % anno['_id'])

# -------------------------END: class PreXsbCrawler----------------------


def main():
    config = ConfigParser()
    config_path = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'config.cfg'))
    with open(config_path, 'r') as cfgfile:
        config.readfp(cfgfile)

    date_range = config.get('input', 'pre_xsb_date_range')
    PreXsbCrawler(date_range).run()

if __name__ == '__main__':
    main()


