#!/usr/bin/env python
# encoding: utf-8
"""
@author: youfeng
@email: youfeng243@163.com
@license: Apache Licence
@file: www_zjcredit_gov_cn.py
@time: 2018/1/3 10:01
"""
import re
import sys

import click
import gevent.pool
from gevent import monkey
from pyquery import PyQuery

monkey.patch_all()

sys.path.append('..')
sys.path.append('../..')
from common import util
from ext.crawler_handler import CrawlerHandler
from logger import Logger

# 日志模块
log = Logger('www_zjcredit_gov_cn.log').get_logger()


class Crawler(CrawlerHandler):
    def __init__(self, store_tube, _log):
        super(Crawler, self).__init__(store_tube, _log)
        self.host = 'www.zjcredit.gov.cn'
        self.web_page_table = 'web_page_www_zjcredit_gov_cn'
        self.seed_table = 'new_zhejiang_company_list_full_20180119'
        self.finish_set = set()

    def parse_text(self, text):
        pattern = 'J\$(.*?)\$\$/corporation/eDetail.do\?id=.*?'
        regex = re.compile(pattern)
        search_list = regex.findall(text)

        result_list = []
        for company in search_list:
            result_list.append({
                '_id': company,
                '_in_time': util.get_now_time(),
                'crawl_status': 0,
            })

        return result_list

    def get_token(self, session):
        url = 'http://www.zjcredit.gov.cn/page/gjcx_b.jsp'

        session.headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7,ja;q=0.6',
            'Cache-Control': 'max-age=0',
            'Connection': 'keep-alive',
            'DNT': '1',
            'Host': 'www.zjcredit.gov.cn',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36',
        }
        resp = self.task_request(session, self.host, session.get, url=url)
        if resp is None:
            self.log.error("获取token页面失败...")
            return None

        token = PyQuery(resp.text, parser='html').find('#token').attr('value')
        if not isinstance(token, basestring):
            return None

        if len(token) <= 10:
            return None

        return token

    def get_total_num(self):
        session = self.get_session(self.host)

        # 先获得token
        token = self.get_token(session)

        self.log.info("获得token = {}".format(token))
        # 获取页码数目
        post_data = {
            'areaCode': '33',
            'uscc': '',
            'jgdm': '',
            'yyzz': '',
            'qymc': '',
            'frdb': '',
            'xyd': '',
            'token': token,
            'imageField2.x': '50',
            'imageField2.y': '10',
        }
        url = 'http://www.zjcredit.gov.cn/corporation/enterpriseList.do'
        resp = self.task_request(session, self.host, session.post, url=url, data=post_data)
        if resp is None:
            self.log.error("获取token页面失败...")
            return None

        pattern = 'totalRecord:(\d+)'
        regex = re.compile(pattern=pattern)
        search_list = regex.findall(resp.text)
        if len(search_list) > 0:
            self.log.info("获取总数目成功: {}".format(search_list[0]))
            return int(search_list[0])

        self.log.error("获取总数目失败...")
        return -1

    def crawl_process(self, start, end, total_num):
        self.log.info("开始抓取范围: start = {} end = {}".format(start, end))
        url = 'http://www.zjcredit.gov.cn/page/corporation/enterpriseSearchProxy.jsp?startrecord={}&endrecord={}&perpage=8&totalRecord={}'.format(
            start, end, total_num)

        _id = '{}_{}'.format(start, end)

        if _id in self.finish_set:
            self.log.info("已经抓取过: start = {} end = {}".format(start, end))
            return 3

        # if self.find_web_page_one(self.web_page_table, {'_id': _id}) is not None:
        #     self.finish_set.add(_id)
        #     self.log.info("已经抓取过: start = {} end = {}".format(start, end))
        #     return 3

        session = self.get_session(self.host)
        session.headers = {
            'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
            'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7,ja;q=0.6',
            'Cache-Control': 'max-age=0',
            'Connection': 'keep-alive',
            'DNT': '1',
            'Host': 'www.zjcredit.gov.cn',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36',
        }

        resp = self.task_request(session, self.host, session.get, url=url)
        if resp is None:
            self.log.info("抓取失败: start = {} end = {}".format(start, end))
            return -1

        data = {
            '_id': _id,
            'text': resp.text,
            '_in_time': util.get_now_time(),
        }

        result_list = self.parse_text(resp.text)
        if len(result_list) > 0:
            self.save_seed_batch(self.seed_table, result_list)
            self.save_list_page_info(self.web_page_table, data)

            self.finish_set.add(_id)
            self.log.info("抓取成功: start = {} end = {}".format(start, end))

            return 1

        self.log.info("抓取失败: start = {} end = {}".format(start, end))
        return -1


@click.command()
@click.option('--total',
              default=1989891,
              type=int,
              help='进程数目')
def main(total):
    store_tube = 'gsxt_store'
    thread_num = 1
    # 初始化线程池
    pool = gevent.pool.Pool(thread_num)
    log.info('启用协程...')
    log.info('当前开启协程数目: thread_num = {}'.format(thread_num))

    segment = 1000

    times = 0
    crawler = Crawler(store_tube, _log=log)

    total_num = -1
    for _ in xrange(5):
        total_num = crawler.get_total_num()
        if total_num != -1:
            break

    if total_num == -1:
        total_num = total

    crawl_flag = True
    while crawl_flag:
        crawl_flag = False
        start = 1
        result_list = []
        while start + segment <= total_num:
            end = start + segment
            result_list.append(pool.apply_async(crawler.crawl_process, args=(start, end, total_num,)))
            start = end + 1

        if start < total_num:
            end = total_num
            result_list.append(pool.apply_async(crawler.crawl_process, args=(start, end, total_num,)))

        for item in result_list:
            result = item.get()
            if result == -1:
                crawl_flag = True
            log.info("当前任务结果: result = {}".format(result))

        pool.join()
        log.info("当前抓取次数: times = {}".format(times))

    log.info("抓取完成..")


if __name__ == '__main__':
    main()
