#!/usr/bin/env python
# -*- coding:utf-8 -*-
import datetime
import json
import sys
import time

from pymongo import MongoClient
from scpy.logger import get_logger
from scpy.qiniu import Qiniu
from scpy.xawesome_crawler import BaseCrawler
from xtls.timeparser import now

reload(sys)
sys.setdefaultencoding("utf-8")
logger = get_logger(__file__)

MONGO = MongoClient('127.0.0.1', 27017)
# MONGO = MongoClient('10.132.23.104', 27017)
DB_NAME, COLL_NAME = 'stockProj', 'shenhuStockAnno'

QUERY_URL = 'http://www.cninfo.com.cn/cninfo-new/announcement/query'
POST_HEADERS = {
    'X-Requested-With': 'XMLHttpRequest',
    'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
}


def parse_time_stamp(time_stamp):
    return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time_stamp))


class StockCrawler(BaseCrawler):

    def __init__(self, category, page=1):
        super(StockCrawler, self).__init__(page=page, category=category, total=0)
        self.post_data = {
            'stock': '',
            'searchkey': '',
            'plate': '',
            'category': '',
            'trade': '',
            'column': 'szse',
            'columnTitle': u'历史公告查询',
            'pageNum': 0,
            'pageSize': 50,
            'tabName': 'fulltext',
            'sortName': '',
            'sortType': '',
            'limit': '',
            'showTitle': '',
            'seDate': self.build_date(),
        }

    @classmethod
    def build_date(cls, p=None):
        if not p:
            return '1949-10-01 ~ ' + datetime.datetime.now().strftime('%Y-%m-%d')
        else:
            p = p.split(' ')[0]
            p = (datetime.datetime.strptime(p, '%Y-%m-%d') + datetime.timedelta(days=1)).strftime('%Y-%m-%d')
            return '1949-10-01 ~ ' + p

    def parse(self, item):
        result = {
            '_id': item['announcementId'],
            'stockCode': item['secCode'],
            'companyShortName': item['secName'],
            'title': item['announcementTitle'],
            'type': [],
            'announcementTime': parse_time_stamp(item['announcementTime'] / 1000),
            'announcementId': item['announcementId'],
            'filetype': str(item['adjunctType']).lower(),
            'downloadUrl': 'http://www.cninfo.com.cn/' + item['adjunctUrl'],
            'section': item['pageColumn'],  # 中小版、创业板...
            'orgId': item['orgId'],
            'columnId': item['columnId'],
            'associateAnnouncement': item['associateAnnouncement'],
        }
        return result

    def save(self, data):
        data['updateTime'] = now()
        old_data = MONGO['stockProj']['shenhuStockAnno'].find_one({'_id': data['_id']})
        if not old_data:
            MONGO['stockProj']['shenhuStockAnno'].insert_one(data)
            rst = 0
        else:
            rst = -1
        return rst

    def _run(self):
        while True:
            self.total += 1
            logger.info('now page : %s-%s' % (self.page, self.total))
            self.post_data['pageNum'] = self.page
            data = self.post(QUERY_URL, data=self.post_data, headers=POST_HEADERS)
            annos = json.loads(data)['announcements']
            if not annos:
                break
            for index, item in enumerate(annos, start=1):
                anno = self.parse(item)
                rst = self.save(anno)
                if rst == -1:
                    pass
            if self.page == 1000:
                logger.info('last %s' % anno)
                return anno
            if len(annos) != 50:
                return
            self.page += 1

    def run(self):
        while True:
            rst = self._run()
            if not rst:
                return
            self.page = 1
            self.post_data['seDate'] = self.build_date(rst['announcementTime'])
            logger.info('process %s' % self.post_data['seDate'])


def main():
    StockCrawler('').run()


if __name__ == '__main__':
    main()
