#!/usr/bin/env python
# -*- coding:utf-8 -*-
import hashlib
import json
import sys
import time

from pymongo import MongoClient
from scpy.logger import get_logger
from scpy.qiniu import Qiniu
from scpy.xawesome_codechecker import no_exception
from scpy.xawesome_crawler import BaseCrawler
from xtls.timeparser import parse_time, now

from util import get_mongo_conf

reload(sys)
sys.setdefaultencoding("utf-8")
logger = get_logger(__file__)

MONGO_CONF = get_mongo_conf()
MONGO = MongoClient(MONGO_CONF[0], MONGO_CONF[1])
DB_NAME = 'crawler_company_all'
qiniu = Qiniu(bucket='sc-crawler', host='10.132.23.104')
# qiniu = Qiniu(bucket='sc-crawler-test', host='121.199.8.121')


def parse_time_stamp(time_stamp):
    return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time_stamp))


def sha1(data):
    return hashlib.sha1(data).hexdigest()


class StockCrawler(BaseCrawler):

    POST_LIST_HEADERS = {
        'X-Requested-With': 'XMLHttpRequest',
        'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
    }

    INDEX_URL = 'http://www.cninfo.com.cn/'
    LIST_URL = 'http://www.cninfo.com.cn/cninfo-new/disclosure/%s_latest'

    def __init__(self, market, page=1):
        if market not in ['szse', 'sse']:
            raise ValueError('market must in [szse, sse], got <%s>' % market)
        super(StockCrawler, self).__init__(_market=market, logger=logger, list_url=StockCrawler.LIST_URL % market)
        self.list_post_data = {
            'stock': '', 'searchkey': '', 'plate': '', 'category': '', 'trade': '',
            'tabName': 'latest', 'sortName': '', 'sortType': '', 'limit': '', 'showTitle': '',
            'pageSize': 30,
            'column': self._market,
            'columnTitle': u'%s市公告' % (u'沪', u'深')[self._market == 'szse'],
            'pageNum': page,
            'seDate': self.build_date()
        }

    @classmethod
    def build_date(cls):
        return ' ~ '.join(t.split(' ')[0] for t in [parse_time('1秒后'), parse_time('1天前')])

    def parse_items(self, companys):
        for company in companys:
            for item in company:
                yield self.format_item(item)

    @classmethod
    @no_exception(on_exception=None, logger=logger)
    def format_item(cls, item):
        result = {
            'stockCode': item['secCode'],
            'companyShortName': item['secName'],
            'announcementTitle': item['announcementTitle'],
            'announcementType': item['announcementTypeName'],
            'announcementTime': parse_time_stamp(item['announcementTime'] / 1000),
            'announcementId': item['announcementId'],
            'filetype': str(item['adjunctType']).lower(),
            'downloadUrl': StockCrawler.INDEX_URL + item['adjunctUrl'],
        }
        return result

    def save_db(self, coll, data):
        try:
            data['updateTime'] = now()
            collection = MONGO[DB_NAME][coll]
            if not collection.find_one({'_id': data['_id']}):
                collection.insert_one(data)
                return 0
            collection.update_one({'_id': data['_id']}, {'$set': data})
            return 1
        except Exception, e:
            logger.exception(e)
            return -1

    def select_db(self, coll, filter):
        try:
            return MONGO[DB_NAME][coll].find_one(filter)
        except Exception, e:
            logger.exception(e)
            return None

    def deal_item(self, item):
        if self.select_db('stockAnnouncement', {'_id': item['announcementId']}):
            return
        content = self.get(item['downloadUrl'], timeout=30)
        rst = qiniu.upload(content, str(item['downloadUrl'][item['downloadUrl'].rfind('.'):]).lower())
        file_name = rst['_id']
        item['file'] = file_name
        item['_id'] = item['announcementId']
        item['source'] = self._market
        self.save_db('stockAnnouncement', item)

    def run(self):
        while True:
            data = self.post(self.list_url, data=self.list_post_data, headers=self.POST_LIST_HEADERS)
            data = json.loads(data)

            companys = data['classifiedAnnouncements']
            if not companys:
                break
            for index, item in enumerate(self.parse_items(companys)):
                if not item:
                    continue
                logger.info(u'now item [%s]' % json.dumps(item, ensure_ascii=False))
                self.deal_item(item)
            if not data['hasMore']:
                break
            self.list_post_data['pageNum'] += 1


def main():
    for market in ['sse', 'szse']:
        StockCrawler(market).run()


if __name__ == '__main__':
    main()
