#!/usr/bin/env python 
# coding:utf-8
# @Time :10/16/18 16:53

import base64
import copy
import json
import sys

import click
import requests

sys.path.append("..")
sys.path.append("../..")
sys.path.append("../../..")
from proxy.proxy_type import ProxyType

from ext.task_base import TaskBase
from logger import AppLogger
from mq.mq_factory import MQFactory

from lxml import html
from PIL import Image

logger = AppLogger('jinan_new_register_new.log').get_logger()


class JiNanNewRegisterNew(TaskBase):
    __START_URL = "http://jncredit.jinan.gov.cn/jnxy/jnxy/publicSigns/xzxk_show.do?deviceCode=999940&keyword=&pageno={}"

    __IMG_URL = "http://jncredit.jinan.gov.cn/jnxy/jnxy/publicSigns/to_photoindex.do?id={}&nodename=%E5%B8%82%E5%B7%A5%E5%95%86%E5%B1%80"

    __BAIDU_TOKEN_URL = "https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id={AK}&client_secret={CK}"
    __BAIDU_OCR_URL = "https://aip.baidubce.com/rest/2.0/ocr/v1/general"

    # 新gsxt的线索队列
    __GSXT_DATA = {
        "province": u"山东",
        "company": None,
        "source_url": u"www.jncredit.gov.cn",
        "register_code": None,
    }

    # other params
    __BAIDU_AK = "lj7bGQB5vM1KmK9DHTPHVsjQ"
    __BAIDU_CK = "VNDY9M79qpX75hpN9uM7EUdgd16xQbcU"

    def __init__(self, search_page, log):
        super(JiNanNewRegisterNew, self).__init__(log)
        self._reset_beanstalk_handler(MQFactory.get_gsxt_clue_beanstalk_handler(log))
        self.__search_page = search_page
        self.log.info("爬取 {} 页的数据...".format(search_page if search_page !=0 else '所有'))
        self.__set_headers()
        self.__access_token = self.__get_baidu_token()

    def __set_headers(self):
        headers = {
            "Host": "www.jncredit.gov.cn",
            "Connection": "keep-alive",
            "Cache-Control": "max-age=0",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7,ja;q=0.6",
        }
        self._proxy_requests.set_header(headers)

    # def __get_total_page(self, html_resp):
    #     try:
    #         page_num = html_resp.xpath('//ul[@class="pagination"]/li[last()]/a/text()')[1]
    #         page_num = int(page_num.strip().split(u'共')[0].replace('/', ''))
    #         return self.__search_page or page_num
    #     except Exception as e:
    #         self.log.error("__get_total_page 获取页码数异常: ")
    #         self.log.exception(e)
    #     return -1

    def __get_page_data(self, url):
        resp = self._proxy_requests.get(url, proxy_type=ProxyType.KUNPENG_DYNAMIC)

        if resp is None:
            self.log.warn("__get_page_data 未能正常请求页面...")
            return -1

        html_resp = html.fromstring(resp.text)
        company_extract = html_resp.xpath(u'//th[contains(text(), "行政相对人名称")]/../following-sibling::*')

        if not company_extract:
            self.log.warn("__get_page_data 页面未提取到 company_extract 数据，当前url={}".format(url))
            return -1

        if len(company_extract) <= 1:
            self.log.warn("__get_page_data 未能拿到数据...请校验页面信息，当前url={}".format(url))
            return -1

        for per_company_extract in company_extract:
            company_name = per_company_extract.xpath(u'./td/a/text()')[0].strip()
            if "..." in company_name:
                sub_id = per_company_extract.xpath("./td/a/@href")[0].replace("to_photo.do?id=", "")
                # 企业名存在“...”， 图像识别企业名
                company_name = self.__handle_company_name(sub_id)
                # 缺少 右边 ")"处理
                if "(" in company_name and ")" not in company_name:
                    company_name += ")"

            gsxt_data = copy.deepcopy(self.__GSXT_DATA)
            gsxt_data["company"] = company_name
            self.log.info("当前采集到的新注册企业公司名为：{}".format(company_name))
            # json.dumps(gsxt_data, ensure_ascii=False)
            self.push_gsxt_clue_data(gsxt_data)

        # total_page = self.__get_total_page(html_resp)
        total_page = self.__search_page
        return total_page

    def __handle_company_name(self, sub_id):
        img_resp = self._proxy_requests.get(self.__IMG_URL.format(sub_id),
                                            proxy_type=ProxyType.KUNPENG_DYNAMIC)
        tmp_img_name = "./tmp.jpg"
        with open(tmp_img_name, 'wb') as f:
            f.write(img_resp.content)
        img = Image.open(tmp_img_name)
        # 截取企业名位置
        img_crop = img.crop((280, 180, 626, 210))
        img_crop.save(tmp_img_name)
        # 调用百度接口识别图片中文字
        name_ocr = self.__ocr_img(tmp_img_name)
        return name_ocr

    def __ocr_img(self, img_path):
        with open(img_path, 'rb') as f:
            image_base64_encode = base64.b64encode(f.read())
        form_data = {"image": image_base64_encode}
        ocr_resp = requests.post(self.__BAIDU_OCR_URL, params={"access_token": self.__access_token}, data=form_data).json()
        if ocr_resp.get("words_result_num") == 1:
            return ocr_resp["words_result"][0].get("words")

    def __get_baidu_token(self):
        headers = {"Content-Type": "application/json; charset=UTF-8"}
        access_resp = requests.get(self.__BAIDU_TOKEN_URL.format(AK=self.__BAIDU_AK, CK=self.__BAIDU_CK),
                                   headers=headers)
        access_token = access_resp.json().get("access_token")
        return access_token

    def start(self, *args, **kwargs):
        total_page = self.__get_page_data(self.__START_URL.format(1))
        if total_page <= 0:
            self.log.warn("start 获取页码数信息异常，不进行遍历: total_page = {}".format(total_page))
            return

        for page in xrange(2, total_page + 1):
            try:
                self.log.info("start 当前采集页面: page = {}".format(page))
                result = self.__get_page_data(self.__START_URL.format(page))
                if result == -2:
                    self.log.info("start 采到时间截止，停止采集..")
                    break
                self.log.info("start 当前页面采集完成: page = {}".format(page))
            except Exception as e:
                self.log.error("start 当前页面采集失败: page = {}".format(page))
                self.log.exception(e)

        self.log.info("成功退出采集程序...")


@click.command()
@click.option('--search_page',
              default=100,
              type=int,
              help='采集页数, 0代表采集所有页数')
def main(search_page):
    try:
        JiNanNewRegisterNew(search_page, logger)()
    except Exception as e:
        logger.error("采集异常退出: ")
        logger.exception(e)


if __name__ == '__main__':
    main()
