#!/usr/bin/env python
# -*- coding:utf-8 -*-
import datetime
import json
import sys
import time

try:
    from cStringIO import StringIO
except ImportError:
    from StringIO import StringIO
from pymongo import MongoClient
from scpy.logger import get_logger
from scpy.xawesome_crawler import BaseCrawler
from xtls.timeparser import now
from scpy.qiniu import Qiniu

reload(sys)
sys.setdefaultencoding("utf-8")
logger = get_logger(__file__)
qiniu = Qiniu(bucket='sc-crawler', host='10.132.23.104')
MONGO = MongoClient('10.132.23.104', 27017)
DB_NAME, COLL_NAME = 'stockProj', 'preDisclosure'

QUERY_URL = 'http://www.cninfo.com.cn/cninfo-new/announcement/query'
POST_HEADERS = {
    'X-Requested-With': 'XMLHttpRequest',
    'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
}


def parse_time_stamp(time_stamp):
    return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time_stamp))


class StockCrawler(BaseCrawler):
    def __init__(self, page=1):
        super(StockCrawler, self).__init__(page=page)
        self.post_data = {
            'stock': '',
            'searchkey': '',
            'plate': '',
            'category': '',
            'trade': '',
            'column': 'pre_disclosure',
            'columnTitle': u'历史公告查询',
            'pageNum': 0,
            'pageSize': 50,
            'tabName': 'fulltext',
            'sortName': '',
            'sortType': '',
            'limit': '',
            'showTitle': '',
            'seDate': self.build_date(),
        }

    @classmethod
    def build_date(cls, p=None):
        if not p:
            return '1949-10-01 ~ ' + datetime.datetime.now().strftime('%Y-%m-%d')
        p = p.split(' ')[0]
        p = (datetime.datetime.strptime(p, '%Y-%m-%d') + datetime.timedelta(days=3)).strftime('%Y-%m-%d')
        return '1949-10-01 ~ ' + p

    def parse(self, item):
        result = {
            '_id': item['announcementId'],
            'stockCode': item['secCode'],
            'companyShortName': item['secName'],
            'title': item['announcementTitle'],
            'type': [item['announcementType']],
            'announcementTime': parse_time_stamp(item['announcementTime'] / 1000),
            'announcementId': item['announcementId'],
            'filetype': str(item['adjunctType']).lower(),
            'downloadUrl': 'http://www.cninfo.com.cn/' + item['adjunctUrl'],
            'section': item['pageColumn'],
            'orgId': item['orgId'],
            'columnId': item['columnId'],
            'associateAnnouncement': item['associateAnnouncement'],
        }
        return result

    def save(self, data):
        data['updateTime'] = now()

        if MONGO['stockProj']['preDisclosure'].find_one({'_id': data['_id']}):
            return True

        logger.info('downloading %s' % data['downloadUrl'])
        content = self.get(data['downloadUrl'])
        rst = qiniu.upload(content, data['filetype'])
        data['fileKey'] = rst['key']

        MONGO['stockProj']['preDisclosure'].insert_one(data)
        return False

        # return MONGO['stockProj']['preDisclosure'].find_one_and_update(
        #     {'_id': data['_id']},
        #     {'$set': data},
        #     upsert=True
        # )

    def run(self):
        while True:
            logger.info('now page : %s' % self.page)
            self.post_data['pageNum'] = self.page
            data = self.post(QUERY_URL, data=self.post_data, headers=POST_HEADERS)
            annos = json.loads(data)['announcements']
            if not annos:
                break
            for index, item in enumerate(annos, start=1):
                anno = self.parse(item)
                if self.save(anno):
                    # return
                    pass
            if len(annos) != 50:
                return
            self.page += 1


def main():
    StockCrawler().run()


if __name__ == '__main__':
    main()
