#!/usr/bin/env python
# encoding=utf-8

import copy
import re
import sys
from time import sleep

import pymongo
from scpy.logger import get_logger
from xtls.basecrawler import BaseCrawler
from xtls.codehelper import timeit, no_exception
from xtls.timeparser import parse_time, now
from xtls.util import BeautifulSoup

logger = get_logger(__file__)

reload(sys)
sys.setdefaultencoding("utf-8")

MONGO = pymongo.MongoClient('10.132.23.104', 27017)
# MONGO = pymongo.MongoClient('127.0.0.1', 27017)
DB, COLL = 'crawler_company_all', 'patent2'
KEY_VALUE_PAIR = {
    u'申请公布号': u'applyPubNum',
    u'申请公布日': u'applyPubDate',
    u'申请号': u'applyNum',
    u'申请日': u'applyDate',
    u'申请人': u'applyPerson',
    u'发明人': u'inventionPerson',
    u'授权公告号': u'authPubNum',
    u'授权公告日': u'authPubDate',
    u'地址': u'address',
    u'分类号': u'classificationNumber',
    u'专利权人': u'patentRightPerson',
    u'设计人': u'designer',
}
PATENT_FORMAT = {
    'image': '', 'type': '', 'title': '',
    'description': '', 'downloadArgs': [], 'qrcode': '',
    KEY_VALUE_PAIR[u'地址']: '',
    KEY_VALUE_PAIR[u'申请公布号']: '',
    KEY_VALUE_PAIR[u'申请公布日']: '',
    KEY_VALUE_PAIR[u'申请号']: '',
    KEY_VALUE_PAIR[u'申请日']: '',
    KEY_VALUE_PAIR[u'申请人']: '',
    KEY_VALUE_PAIR[u'发明人']: '',
    KEY_VALUE_PAIR[u'授权公告号']: '',
    KEY_VALUE_PAIR[u'授权公告日']: '',
    KEY_VALUE_PAIR[u'分类号']: '',
    KEY_VALUE_PAIR[u'专利权人']: '',
    KEY_VALUE_PAIR[u'设计人']: '',
}

QUERY_URL = 'http://epub.sipo.gov.cn/patentoutline.action'
UA_FF = 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:37.0) Gecko/20100101 Firefox/37.0'


class PatentCrawler(BaseCrawler):
    def __init__(self, ipc, page):
        super(PatentCrawler, self).__init__(ipc=ipc, page=int(page))
        self._request.headers['User-Agent'] = UA_FF

    @classmethod
    @no_exception(None, logger)
    def deal_classification_number(cls, li):
        text = li.getText().strip()[4:]
        result = {
            'source': text,
            'number': text,
            'patentAgency': '',  # 专利代理机构
            'agent': '',  # 代理人
            'priority': '',  # 优先权
        }
        ul = li.find('ul')
        if not ul:
            return result
        result['number'] = text.split('\n')[0].replace(u'全部', '').strip()
        for l in ul.find_all('li'):
            temp = l.getText().strip()
            if temp.startswith(u'专利代理机构'):
                result['patentAgency'] = temp[7:]
            elif temp.startswith(u'代理人'):
                result['agent'] = temp[4:]
            elif temp.startswith(u'优先权'):
                result['priority'] = temp[4:]
            else:
                pass
                # logger.info('deal_classification_number: >> %s <<' % temp)
        return result

    def parse_line(self, li):
        text = li.getText().strip().split(u'：')
        if len(text) < 2:
            return None
        if u'分类号' == text[0]:
            return KEY_VALUE_PAIR[text[0]], self.deal_classification_number(li)
        try:
            return KEY_VALUE_PAIR[text[0]], text[1]
        except:
            pass
        return None

    def parse_item(self, soup):
        patent_item = copy.deepcopy(PATENT_FORMAT)
        img = soup.find('div', attrs={'class': 'cp_img'}).find('img')['src']
        if img != 'images/cp_noimg.jpg':
            patent_item['image'] = 'http://epub.sipo.gov.cn/' + img
        main_soup = soup.find('div', attrs={'class': 'cp_linr'})
        if not main_soup:
            raise Exception('no patent info found.')
        patent_item['type'], patent_item['title'] = map(lambda x: x.strip(), re.findall(ur'\[(.+?)\](.+)',
                                                                                        main_soup.find(
                                                                                            'h1').getText().strip())[0])
        try:
            text = soup.find('div', attrs={'class': 'cp_jsh'}).getText().strip()
            if text.startswith(u'摘要'):
                text = text.replace(u'摘要：', '')
            elif text.startswith(u'简要说明'):
                text = text.replace(u'简要说明：', '')
            if text.endswith(u'全部'):
                text = text[:-2]
            patent_item['description'] = text.strip()
        except Exception, e:
            logger.exception(e)

        for li in main_soup.find('ul').findChildren('li'):
            text = li.getText().strip()
            if not text:
                continue
            res = self.parse_line(li)
            if res:
                patent_item[res[0]] = res[1]
        try:
            patent_item['downloadArgs'] = re.findall(ur"\('(.+?)'.+?'(.+?)'.+?'(.+?)'\)",
                                                     soup.find('p', attrs={'class': 'cp_botsm'}).find('a')['href'])[0]
        except:
            pass
        try:
            patent_item['qrcode'] = 'http://epub.sipo.gov.cn/%s' % \
                                    soup.find('a', attrs={'class': 'qrcode'}).find('img')['src']
        except:
            pass
        for key in ('applyDate', 'applyPubDate', 'authPubDate'):
            patent_item[key] = parse_time(patent_item[key])
        return patent_item

    @classmethod
    def save(cls, item):
        item['updateTime'] = now()
        item['_id'] = item['applyNum']
        return MONGO[DB][COLL].find_one_and_update({'_id': item['_id']}, {'$set': item}, upsert=True)

    def run_page(self, page):
        logger.info('now page: %s-%s' % (self.ipc, page))
        post_param = {
            'showType': 1, 'strWord': u'分类号=\'%\'',
            'numSortMethod': 0, 'strLicenseCode': '', 'selected': self.ipc,
            'numFMGB': '', 'numFMSQ': '', 'numSYXX': '',
            'numWGSQ': '', 'pageSize': 10, 'pageNow': page,
        }
        for _ in xrange(3):
            html = self.post(QUERY_URL, post_param, timeout=20)
            if html is None:
                logger.info('sleeping')
                sleep(32 * 60)
                continue
            break
        else:
            raise RuntimeError(u'ip 被封了')

        soup = BeautifulSoup(html)

        main_soup = soup.find('div', attrs={'class': 'w790 right'})
        if not main_soup:
            print soup
            print post_param
            return False
        for item in main_soup.find_all('div', attrs={'class': 'cp_box'}):
            item = self.parse_item(item)
            # print json.dumps(item, ensure_ascii=False, indent=4, sort_keys=True)
            self.save(item)
        return '>' in main_soup.find('div', attrs={'class': 'next'}).find_all('a')[-1].getText()

    def run(self):
        while self.run_page(self.page):
            self.page += 1
        return self.page


@timeit(logger)
def main(ipc):
    last_page = MONGO['crawler_log']['patent_pages'].find_one({'_id': ipc})['last']
    curr_page = PatentCrawler(ipc, last_page).run()
    if last_page == curr_page:
        return
    MONGO['crawler_log']['patent_pages'].update_one(
        {'_id': ipc},
        {'$set': {'_id': ipc, 'last': curr_page, 'updateTime': now()}}
    )


if __name__ == '__main__':
    main(sys.argv[1])
