#!/usr/bin/env python
# encoding: utf-8
"""
@author: youfeng
@email: youfeng243@163.com
@license: Apache Licence
@file: search_page_crawler.py
@time: 2017/12/27 21:42
@列表页抓取功能
"""
from common import util
from config.app_conf import SEARCH_LIST_FAIL
from ext.crawler_handler import CrawlerHandler


# 列表页抓取功能
class CompanyPageCrawler(CrawlerHandler):
    def __init__(self, store_tube, seed_table, log):
        super(CompanyPageCrawler, self).__init__(store_tube, log)

        # 存储表信息
        # 种子表
        self.seed_table = seed_table

        # host
        self.host = 'www.jsgsj.gov.cn:58888'

        # 种子地址
        self.seed_url = 'http://www.jsgsj.gov.cn:58888/province/NoticeServlet.json?queryXukeNoticeList=true'

    def crawl_list_page(self, session, page):
        post_data = {
            'corpName': '',
            'pageSize': 10,
            'curPage': page,
            'sortName': '',
            'sortOrder': '',
        }

        resp = self.task_request(session, self.host, session.post, self.seed_url, data=post_data)
        if resp is None:
            return 0

        json_data = util.json_loads(resp.text)
        if json_data is None:
            return 0

        data_list = json_data.get("data")
        if not isinstance(data_list, list):
            return 0

        result_list = []
        for item in data_list:
            company = item.get('CORP_NAME')
            if not isinstance(company, basestring):
                continue

            company = company.strip()
            if company == '':
                continue

            result_list.append({
                '_id': company,
                'in_time': util.get_now_time(),
            })
        if self.save_seed_batch(self.seed_table, result_list) > 0:
            self.log.error("存储数据数目为: page = {} length = {}".format(page, len(result_list)))
            return 1
        self.log.error("没有提取到数据: page = {}".format(page))
        return 0

    # 抓取列表页
    def crawl_process(self, page):
        ''' 抓取入口
        :param page:
        :return:
        '''
        self.log.info("开始抓取企业: page = {}".format(page))
        session = self.get_session(self.host)
        try:
            status = self.crawl_list_page(session, page)
        except Exception as e:
            status = SEARCH_LIST_FAIL
            self.log.error("请求列表页错误: page = {}".format(page))
            self.log.exception(e)

        self.log.info("抓取状态: page = {} status = {}".format(page, status))
        return status, page
