#!/usr/bin/env python 
# coding:utf-8
# @Time :9/26/18 14:24

import json
import re
import sys

import click

sys.path.append("..")
sys.path.append("../..")
sys.path.append("../../..")
from proxy.proxy_type import ProxyType

from ext.task_base import TaskBase
from logger import AppLogger

from lxml import html

logger = AppLogger('company_list_crawl.log').get_logger()


class CompanyListCrawl(TaskBase):
    __START_URL = "http://www.cninfo.com.cn/cninfo-new/information/companylist"

    __ID_DIV = "con-a-{}"

    def __init__(self, log, save_json=False):
        super(CompanyListCrawl, self).__init__(log)
        self.save_json = save_json
        self.__set_headers()

    def __set_headers(self):
        headers = {
            "Host": "www.cninfo.com.cn",
            "Connection": "keep-alive",
            "Upgrade-Insecure-Requests": "1",
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
            "Accept-Encoding": "gzip, deflate",
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,zh-TW;q=0.7,ja;q=0.6",
        }
        self._proxy_requests.set_header(headers)

    def __grab_company_list(self):
        self.log.info('__grab_company_list 采集开始...')
        response = self._proxy_requests.get(self.__START_URL, proxy_type=ProxyType.KUNPENG_DYNAMIC)

        company_info = []

        html_response = html.fromstring(response.text)

        for per_filed in range(1, 5):
            filed_id = self.__ID_DIV.format(per_filed)
            companies_extract = html_response.xpath('//div[@id="{}"]//li/a'.format(filed_id))
            for per_company_extract in companies_extract:
                # per_url: http://www.cninfo.com.cn/information/companyinfo_n.html?fulltext?shmb600252
                per_company_url = per_company_extract.xpath('./@href')[0]
                per_company_text = per_company_extract.xpath('./text()')[0]
                type_code, stock_code = re.compile('.*?fulltext\?([a-z]+)(\d+)').match(per_company_url).groups()
                stock_name = per_company_text.replace(stock_code, '').lstrip()
                company_item = {"type_code": type_code,
                                "stock_code": stock_code,
                                "stock_name": stock_name}
                company_info.append(company_item)

        if self.save_json:
            with open("../company_list.json", "w") as f:
                json.dump(company_info, f, ensure_ascii=False)
        return company_info

    def start(self, *args, **kwargs):
        self.__grab_company_list()
        self.log.info("成功退出采集程序...")


@click.command()
@click.option('--save_json',
              default=True,
              type=bool,
              help='是否保存json数据至本地')
def main(save_json):
    try:
        CompanyListCrawl(logger, save_json)()
    except Exception as e:
        logger.error("采集异常退出: ")
        logger.exception(e)


if __name__ == '__main__':
    main()
