#!/usr/bin/env python
# -*- coding:utf-8 -*-
import datetime
import json
import sys
import time

from pymongo import MongoClient
from scpy.logger import get_logger
from scpy.xawesome_crawler import BaseCrawler
from xtls.timeparser import now
from xtls.codehelper import forever

reload(sys)
sys.setdefaultencoding("utf-8")
logger = get_logger(__file__)

MONGO = MongoClient('127.0.0.1', 27017)
DB_NAME, COLL_NAME = 'stockProj', 'xinsanban'

QUERY_URL = 'http://www.cninfo.com.cn/cninfo-new/announcement/query'
POST_HEADERS = {
    'X-Requested-With': 'XMLHttpRequest',
    'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
}
CATEGORY = {
    "category_cxpl_lwts": u"持续信息披露",
    "category_dqgg_lwts": u"定期公告",
    "category_lsgg_lwts": u"临时公告",
    "category_scpl_lwts": u"首次信息披露",
    "category_zjjg_lwts": u"中介机构公告",
}


def parse_time_stamp(time_stamp):
    return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time_stamp))


class StockCrawler(BaseCrawler):

    def __init__(self, category, page=1):
        super(StockCrawler, self).__init__(page=page, category=category)
        self.post_data = {
            'stock': '',
            'searchkey': '',
            'plate': '',
            'category': '%s;' % self.category,
            'trade': '',
            'column': 'neeq_company',
            'columnTitle': u'历史公告查询',
            'pageNum': 0,
            'pageSize': 50,
            'tabName': 'fulltext',
            'sortName': '',
            'sortType': '',
            'limit': '',
            'showTitle': '',
            'seDate': self.build_date(),
        }

    @classmethod
    def build_date(cls, p=None):
        if not p:
            return '1949-10-01 ~ ' + datetime.datetime.now().strftime('%Y-%m-%d')
        p = p.split(' ')[0]
        p = (datetime.datetime.strptime(p, '%Y-%m-%d') + datetime.timedelta(days=3)).strftime('%Y-%m-%d')
        return '1949-10-01 ~ ' + p

    def parse(self, item):
        result = {
            '_id': item['announcementId'],
            'stockCode': item['secCode'],
            'companyShortName': item['secName'],
            'title': item['announcementTitle'],
            'type': CATEGORY[self.category],
            'announcementTime': parse_time_stamp(item['announcementTime'] / 1000),
            'announcementId': item['announcementId'],
            'filetype': str(item['adjunctType']).lower(),
            'downloadUrl': 'http://www.cninfo.com.cn/' + item['adjunctUrl'],
            'section': item['pageColumn'],  # 中小版、创业板...
            'orgId': item['orgId'],
            'columnId': item['columnId'],
            'associateAnnouncement': item['associateAnnouncement'],
        }
        return result

    def save(self, data):
        print data
        return -1
        # data['updateTime'] = now()
        # old_data = MONGO[DB_NAME][COLL_NAME].find_one({'_id': data['_id']})
        # if not old_data:
        #     data['type'] = [data['type']]
        #     MONGO[DB_NAME][COLL_NAME].insert_one(data)
        #     rst = 0
        # elif data['type'] not in old_data['type']:
        #     data['type'] = old_data['type'] + [data['type']]
        #     MONGO[DB_NAME][COLL_NAME].find_one_and_update(
        #         {'_id': data['_id']},
        #         {'$set': data}
        #     )
        #     rst = 1
        # else:
        #     rst = -1
        # return rst

    def _run(self):
        while True:
            logger.info('now page : %s' % self.page)
            self.post_data['pageNum'] = self.page
            data = self.post(QUERY_URL, data=self.post_data, headers=POST_HEADERS)
            annos = json.loads(data)['announcements']
            if not annos:
                break
            for index, item in enumerate(annos, start=1):
                anno = self.parse(item)
                rst = self.save(anno)
                if rst == -1:
                    return
                if rst == 1:
                    pass
            if self.page == 1000:
                logger.info('last %s' % anno)
                return anno
            if len(annos) != 50:
                return
            self.page += 1

    def run(self):
        while True:
            rst = self._run()
            if not rst:
                return
            self.page = 1
            self.post_data['seDate'] = self.build_date(rst['announcementTime'])


def main():
    StockCrawler('category_cxpl_lwts').run()
    # for loop_count in forever(1):
    #     logger.info('now loop_count %s' % loop_count)
    #     before = time.time()
    #     for category in CATEGORY:
    #         logger.info('crawling %s-%s' % (category, CATEGORY[category]))
    #         StockCrawler(category).run()
    #     end = time.time()
    #
    #     if end - before < 1800:
    #         logger.info('task done, sleeping... %s' % (end - before))
    #         time.sleep(1800 - end + before)

if __name__ == '__main__':
    main()


#
#
# {
#     "plate": {
#         "sz": "深市公司",
#         "szmb": "深市主板",
#         "szzx": "中小板",
#         "szcy": "创业板",
#         "shmb": "沪市主板"
#     },
#     ,
#     "trade": {
#         "农、林、牧、渔业": "农、林、牧、渔业",
#         "采矿业": "采矿业",
#         "制造业": "制造业",
#         "电力、热力、燃气及水生产和供应业": "电力、热力、燃气及水生产和供应业",
#         "建筑业": "建筑业",
#         "批发和零售业": "批发和零售业",
#         "交通运输、仓储和邮政业": "交通运输、仓储和邮政业",
#         "住宿和餐饮业": "住宿和餐饮业",
#         "信息传输、软件和信息技术服务业": "信息传输、软件和信息技术服务业",
#         "金融业": "金融业",
#         "房地产业": "房地产业",
#         "租赁和商务服务业": "租赁和商务服务业",
#         "科学研究和技术服务业": "科学研究和技术服务业",
#         "水利、环境和公共设施管理业": "水利、环境和公共设施管理业",
#         "居民服务、修理和其他服务业": "居民服务、修理和其他服务业",
#         "教育": "教育",
#         "卫生和社会工作": "卫生和社会工作",
#         "文化、体育和娱乐业": "文化、体育和娱乐业",
#         "综合": "综合"
#     }
# }
