#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
A股市场补一些公告分类的爬虫
"""

import datetime
import json
import sys
import time
# try:
#     from cStringIO import StringIO
# except ImportError:
#     from StringIO import StringIO

from pymongo import MongoClient
from scpy.logger import get_logger
from scpy.qiniu import Qiniu
from scpy.xawesome_crawler import BaseCrawler
from xtls.timeparser import now, parse_time
from xtls.codehelper import forever
# from util import convert

reload(sys)
sys.setdefaultencoding("utf-8")
logger = get_logger(__file__)

MONGO = MongoClient('10.132.23.104', 27017)
DB_NAME, COLL_NAME = 'stockProj', 'shenhuStockAnno'
qiniu = Qiniu(bucket='sc-crawler', host='10.132.23.104')

QUERY_URL = 'http://www.cninfo.com.cn/cninfo-new/announcement/query'
POST_HEADERS = {
    'X-Requested-With': 'XMLHttpRequest',
    'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
}


def parse_time_stamp(time_stamp):
    return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time_stamp))


class StockCrawler(BaseCrawler):
    def __init__(self, page=1):
        super(StockCrawler, self).__init__(page=page)
        self.post_data = {
            'stock': '',
            'searchkey': '',
            'plate': '',
            'category': '',
            'trade': '',
            'column': 'szse',
            'columnTitle': u'历史公告查询',
            'pageNum': 0,
            'pageSize': 50,
            'tabName': 'fulltext',
            'sortName': '',
            'sortType': '',
            'limit': '',
            'showTitle': '',
            'seDate': self.build_date(),
        }

    @classmethod
    def build_date(cls):
        return parse_time(u'1天前').split(' ')[0] + ' ~ ' + parse_time(u'1秒前').split(' ')[0]

    def parse(self, item):
        result = {
            '_id': item['announcementId'],
            'stockCode': item['secCode'],
            'companyShortName': item['secName'],
            'title': item['announcementTitle'],
            'type': [],
            'announcementTime': parse_time_stamp(item['announcementTime'] / 1000),
            'announcementId': item['announcementId'],
            'filetype': str(item['adjunctType']).lower(),
            'downloadUrl': 'http://www.cninfo.com.cn/' + item['adjunctUrl'],
            'section': item['pageColumn'],  # 中小版、创业板...
            'orgId': item['orgId'],
            'columnId': item['columnId'],
            'associateAnnouncement': item['associateAnnouncement'],
        }
        return result

    def save(self, data):
        logger.info('save item %s' % data['_id'])
        data['updateTime'] = now()
        old_data = MONGO[DB_NAME][COLL_NAME].find_one({'_id': data['_id']})
        if not old_data:
            try:
                if data['downloadUrl'].lower().endswith('pdf'):
                    content = self.get(data['downloadUrl'])
                    rst = qiniu.upload(content, suffix='pdf')
                    data['fileKey'] = rst['_id']
                # fp = StringIO(content)
                # data['content'] = convert(fp)
            except Exception, e:
                logger.exception(e)

            MONGO[DB_NAME][COLL_NAME].insert_one(data)
            rst = 0
        else:
            rst = -1
        return rst

    def run(self):
        while True:
            logger.info('now page : %s' % self.page)
            self.post_data['pageNum'] = self.page
            data = self.post(QUERY_URL, data=self.post_data, headers=POST_HEADERS)
            annos = json.loads(data)['announcements']
            if not annos:
                break
            for index, item in enumerate(annos, start=1):
                anno = self.parse(item)
                self.save(anno)
            if self.page == 1000:
                break
            if len(annos) != 50:
                return
            self.page += 1


def main():
    StockCrawler().run()


if __name__ == '__main__':
    main()

