#!/usr/bin/env python
# encoding=utf-8

"""
上海证券交易所-监管措施
http://www.szse.cn/main/disclosure/jgxxgk/jgcs/
"""
import sys
from base64 import b64encode

from pymongo import MongoClient
from scpy.logger import get_logger
from xtls.basecrawler import BaseCrawler
from xtls.codehelper import no_exception
from xtls.timeparser import parse_time, now
from xtls.util import BeautifulSoup, sha1

from util import get_mongo_conf

reload(sys)
sys.setdefaultencoding('utf-8')

logger = get_logger(__file__)
LIST_URL = 'http://www.sse.com.cn/disclosure/credibility/measuress/'
THRESHOLD = 10
HOST, PORT = get_mongo_conf()
CONN = MongoClient(HOST, PORT)


class SseCrawler(BaseCrawler):
    def __init__(self):
        super(SseCrawler, self).__init__(status=[0, 0, 0])
        self._request.headers.update({'Host': 'www.sse.com.cn'})
        logger.info('init Sse Crawler done.')

    @no_exception(on_exception=None)
    def parse(self, item):
        tds = item.find_all('td')
        if len(tds) < 6:
            return None
        link = tds[3].find('a')
        if link and link['href'].startswith('/'):
            link = 'http://www.sse.com.cn' + link['href']
        else:
            link = ''
        tds = [td.getText().strip() for td in tds]
        return {
            'stockCode': tds[0],  # 股票代码
            'companyShortName': tds[1],  # 公司简称
            'regulatoryMeasures': tds[2],  # 监管措施
            'involvedSubject': tds[3],  # 涉及事由
            'involvedObject': tds[4],  # 涉及对象
            'regulatoryMeasuresTime': parse_time(tds[5]),  # 采取监管措施日期
            'sourceStockExchange': 'sse',
            'remarks': self.mark(link)  # 备注
        }

    @no_exception(on_exception="")
    def mark(self, link):
        if not link:
            return ''
        if link.endswith('docx') or link.endswith('doc'):
            return b64encode(self.get(link))
        if link.endswith('.shtml'):
            return BeautifulSoup(self.get(link)).find('div', attrs={'class': 'block_l1'}).getText().strip()
        return ''

    @no_exception(on_exception=2, logger=logger)
    def save(self, data):
        data['_id'] = sha1(str(data.values()))
        logger.info('save item : %s' % data['_id'])
        data['updateTime'] = now()
        result = CONN['crawler_company_all']['regulatoryMeasures'].find_one_and_update(
            filter={'_id': data['_id']},
            update={'$set': data}, upsert=True)
        if result:
            return 1
        return 0

    def run(self):
        html = self.get(LIST_URL)
        soup = BeautifulSoup(html)
        contents_soup = soup.find('div', attrs={'class': 'tab-contents'})
        # print len(contents_soup.find_all('div', attrs={'class': 'tab-content'}))
        table = contents_soup.find('table', attrs={'id': 'dateList_container'})
        for index, tr in enumerate(table.find_all('tr')):
            data = self.parse(tr)
            if not data:
                continue
            result = self.save(data)
            self.status[result] += 1
            if self.status[1] > THRESHOLD or self.status[2] > THRESHOLD:
                return


def main():
    SseCrawler().run()


if __name__ == '__main__':
    main()
