#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
__author__ : xyh
__date__: 2016-09-14
爬虫基类： 巨潮网, A股公告, 新三板公告, A股预披露
中小企业股转系统, 新三板预披露需要重写run方法
"""

import json
import sys
import traceback

from pymongo import MongoClient
from scpy.logger import get_logger
from scpy.qiniu import Qiniu
from scpy.xawesome_crawler import BaseCrawler
from xtls.timeparser import now, parse_time

reload(sys)
sys.setdefaultencoding("utf-8")
logger = get_logger(__file__)

MONGO = MongoClient('10.132.23.104', 27017)

qiniu = Qiniu(bucket='sc-crawler', host='10.132.23.104')

POST_HEADERS = {
    'X-Requested-With': 'XMLHttpRequest',
    'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
}


class StockCrawler(BaseCrawler):

    def __init__(self, post_data, date_range=None, page=1):
        self.qurey_url = 'http://www.cninfo.com.cn/cninfo-new/announcement/query'
        super(StockCrawler, self).__init__(page=page)
        self.db = 'stockProj'
        self.collection = ''

        date_range = self.set_date_range(date_range)
        self.start_date = date_range.split('~')[0].strip()
        self.end_date = date_range.split('~')[1].strip()

        self.post_data = post_data
        self.name = 'base_crawler'

    def set_date_range(self, date_range):
        if not date_range:
            return parse_time(u'1天前').split(' ')[0] + ' ~ ' + parse_time(u'1秒前').split(' ')[0]  # eg: '2016-09-13 ~ 2016-09-14'

        return date_range

# ----------------------------parse-------------------------------------
    def parse(self, item):
        """
        须重写, 每个网页的解析规则
        :param item:
        :return: dict result解析之后的结果
        """
        raise NotImplementedError()

# -------------------------save-------------------------------------
    def save(self, data):
        logger.info('%s-save item %s' % (self.name, data['_id']))
        data['updateTime'] = now()
        old_data = MONGO[self.db][self.collection].find_one({'_id': data['_id']})
        if old_data:
            data = self.complete_data(data, old_data)
            rst = 1
        else:
            data = self._upload_qiniu(data)
            rst = 0

        self._update_db(data, rst)

        return rst

    def complete_data(self, data, old_data):
        if not old_data.get('fileKey'):
            data = self._upload_qiniu(data)
        else:
            data['fileKey'] = old_data.get('fileKey')

        if data['type'][0] not in old_data['type']:
            data['type'] = old_data['type'] + data['type']

        return data

    def _upload_qiniu(self, data):
        try:
            if data['downloadUrl'].lower().endswith('pdf'):
                content = self.get(data['downloadUrl'])
                rst = qiniu.upload(content, suffix='pdf')
                data['fileKey'] = rst['_id']
        except Exception, e:
            logger.exception(self.name)
        finally:
            return data

    def _update_db(self, data, rst):
        if rst == 0:
            MONGO[self.db][self.collection].update_one({'_id': data['_id']}, {'$set': data}, upsert=True)
        else:
            MONGO[self.db][self.collection].find_one_and_update({'_id': data['_id']}, {'$set': data})

# ----------------------------------run-----------------------------------
# 巨潮网, A股公告, 新三板公告, A股预披露的爬虫直接使用run方法
# 中小企业股转系统, 新三板预披露文件需要重写run方法
    def run(self):
        total_record_num = self.check_total_record_num()
        if not total_record_num:
            logger.exception('[x] %s-No record' % self.name)
            return

        try:
            while True:
                logger.info('%s-now page : %s' % (self.name, self.page))
                self.post_data['pageNum'] = self.page
                self.post_data['seDate'] = '{} ~ {}'.format(self.start_date, self.end_date)
                data = self.post(self.qurey_url, data=self.post_data, headers=POST_HEADERS)
                annos = json.loads(data)['announcements']
                if not annos:
                    break
                for index, item in enumerate(annos, start=1):
                    anno = self.parse(item)
                    rst = self.save(anno)
                    if rst == 1:
                        logger.info('%s-onemore %s' % (self.name, anno['_id']))
                    else:
                        pass

                if self.page >= 1000:
                    self.end_date = self._recreate_end_date(annos)
                    self.page = 0

                if len(annos) != 50:
                    return

                self.page += 1

        except Exception, e:
            logger.exception(self.name)

        finally:
            MONGO.close()

    def check_total_record_num(self):
        self.post_data['pageNum'] = 1
        data = self.post(self.qurey_url, data=self.post_data, headers=POST_HEADERS)
        if not data:
            return
        return json.loads(data).get('totalRecordNum')

    def _recreate_end_date(self, annos):
        for item in annos:
            anno = self.parse(item)
            time_str = anno.get('announcementTime', None)
            if time_str:
                return time_str.split(' ')[0].strip()

# -----------------------------END: class StockCrawler------------------------------------