#!/usr/bin/env python
# encoding=utf-8

import json
import sys
from random import randint
from time import time

from pymongo import MongoClient
from scpy.logger import get_logger
from scpy.qiniu import Qiniu
from xtls.basecrawler import BaseCrawler
from xtls.timeparser import now

from util import get_mongo_conf

reload(sys)
sys.setdefaultencoding('utf-8')

__author__ = 'xlzd'

logger = get_logger(__file__)

URL = 'http://rmfygg.court.gov.cn/psca/lgnot/solr/searchBulletinInterface.do?callback=jQuery15209624390960671008_1450418481765&start={page}&limit=16&wd=rmfybulletin&list%5B0%5D=bltntype%3A&_={time}'
DOC_URL = 'http://rmfygg.court.gov.cn/psca/lgnot/bulletin/download/{doc_id}.pdf'

MONGO_CONF = get_mongo_conf()
MONGO = MongoClient(MONGO_CONF[0], MONGO_CONF[1])


def _gen_time():
    return "%s%s" % (int(time()), randint(100, 999))


class NoticeCrawler(BaseCrawler):
    def __init__(self):
        super(NoticeCrawler, self).__init__(page=1)
        self._request.headers['User-Agent'] = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.73 Safari/537.36'

    @classmethod
    def _deal_party(cls, string):
        return filter(lambda x: x, string.split(u'、'))

    @classmethod
    def parse(cls, item):
        return {
            'type': item.get('bltntypename', ''),
            'province': item.get('province', ''),
            'judge': item.get('judge', ''),
            'judgePhone': item.get('judgephone', ''),
            'court': item.get('courtcode', ''),
            'publishPage': item.get('publishpage', ''),
            'dealGrade': item.get('dealgradename', ''),
            'content': item.get('content', ''),
            'caseNo': item.get('caseno', ''),
            'publishTime': item.get('publishdate', '') + ' 00:00:00',
            'plaintiff': NoticeCrawler._deal_party(item.get('party1', '')),
            'defendant': NoticeCrawler._deal_party(item.get('party2', '')),
            'documentId': item.get('id', ''),
            '_id': item.get('bltnno', ''),
            'documentFile': '',
            'updateTime': now()
        }

    def run(self):
        saved_count = 0
        while True:
            url = URL.format(page=self.page, time=_gen_time())
            logger.info('page: %s' % self.page)
            html = self.get(url)
            data = json.loads(html[html.find('{'):-1])

            if not data['objs']:
                return

            for item in data['objs']:
                item = NoticeCrawler.parse(item)
                # 临时从两张表查询
                saved = MONGO['crawler_company_all']['chinacourtnotice'].find_one(filter={'_id': item['_id']})
                saved2 = MONGO['crawler_company_all']['chinacourtnoticenew'].find_one(filter={'_id': item['_id']})
                if saved or saved2:
                    saved_count += 1
                    if saved_count > 10:
                        return
                    continue
                if item['documentId']:
                    document_data = self.get(DOC_URL.format(doc_id=item['documentId']))
                    filename = save_file(document_data)
                    item['documentFile'] = filename
                MONGO['crawler_company_all']['chinacourtnoticenew'].insert_one(item)
                logger.info('now item : ' + item['_id'])
            self.page += 1


def save_file(content):
    qiniu = Qiniu(bucket='sc-crawler', host='10.132.23.104')
    result = qiniu.upload(content, 'pdf')
    return result['_id']


def main():
    NoticeCrawler().run()


if __name__ == '__main__':
    main()

