#!/usr/bin/env python
# -*- coding:utf-8 -*-

import re
import time

from base.gsxt_base_worker import GsxtBaseWorker
from common import util
from common.global_field import Model


class GsxtShanDongWorker(GsxtBaseWorker):
    def __init__(self, **kwargs):
        GsxtBaseWorker.__init__(self, **kwargs)

    def get_token(self, session):
        for _ in xrange(5):
            url = "http://{host}/".format(host=self.host)
            r = self.task_request(session.get, url)
            if r is None:
                self.log.error('访问主页失败了 无法获得token...')
                time.sleep(3)
                continue

            regex = ur"name=\"_csrf\" value=\"(.*?)\""
            search_list = re.search(regex, r.text)
            if search_list is not None:
                return search_list.group(1)
            time.sleep(3)

        self.log.error('没有匹配到 token ')
        return None

    def get_search_result_html(self, keyword, session):
        url_list = []
        token = self.get_token(session)
        if token is None:
            return url_list, self.SEARCH_ERROR

        try:
            result_url = 'http://{host}/pub/query/'.format(host=self.host)
            session.headers['X-CSRF-TOKEN'] = token
            post_data = {
                'keyword': keyword,
                'isjyyc': 0,
                'isyzwf': 0
            }
            result_resp = self.task_request(session.post, result_url, data=post_data)
            if result_resp is None:
                self.log.error('搜索列表失败....')
                return url_list, self.SEARCH_ERROR

            result_json = util.json_loads(result_resp.text)
            if result_json is None:
                self.log.error('json转换失败...{text}'.format(text=result_resp.text))
                return url_list, self.SEARCH_ERROR

            results = result_json.get('results', None)
            if results is None:
                self.log.error('没有对应的results字段信息')
                return url_list, self.SEARCH_ERROR

            url_list = self.get_url_list(results)
            return url_list, self.SEARCH_SUCCESS if len(url_list) > 0 else self.SEARCH_NOTHING_FIND
        except Exception as e:
            self.log.exception(e)
        return url_list, self.SEARCH_ERROR

    # 工商公示, 企业公示
    def get_url_list(self, result_json):
        param_list = []
        for item in result_json:
            pri_pid = item.get('pripid', None)
            if pri_pid is None:
                self.log.warn('没有pripid 字段')
                continue

            param_list.append({'pripid': pri_pid})
        return param_list

    @staticmethod
    def get_company_name(text):

        json_data = util.json_loads(text)
        if json_data is None:
            return None

        jbxx = json_data.get('jbxx', None)
        if jbxx is None:
            return None

        ent_name = jbxx.get('entname', None)
        if ent_name is None or ent_name == '':
            ent_name = jbxx.get('traname', None)
            if ent_name is None or ent_name == '':
                return None

        return ent_name.strip()

    def get_detail_html_list_qy(self, seed, session, pri_pid, flag, rank):
        base_info_url = 'http://{host}/pub/jbxx/{flag}/{pripid}'.format(
            host=self.host, pripid=pri_pid, flag=flag)
        base_info = self.task_request(session.post, base_info_url)
        if base_info is None:
            self.log.error('基础信息获取失败...{pripid}'.format(pripid=pri_pid))
            return None

        # 解析公司信息
        company_name = self.get_company_name(base_info.text)
        if company_name is None:
            self.log.error('获取公司信息失败...{text}'.format(text=base_info.text))
            return None

        # 建立数据模型
        data = self.get_model(company_name, rank, seed, self.province)

        # 包含了基本信息,股东信息,变更信息
        self.append_model(data, Model.base_info, base_info_url, base_info.text)

        # 获取股东信息
        url = 'http://{host}/pub/jsxx/{pripid}'.format(
            host=self.host, pripid=pri_pid)
        r = self.task_request(session.post, url)
        if r is not None:
            self.append_model(data, Model.shareholder_info, url, r.text)

        # 获取年报信息
        annual_url = 'http://{host}/pub/annual/{flag}/{pripid}'.format(
            host=self.host, pripid=pri_pid, flag=flag)
        annual_info = self.task_request(session.post, annual_url)
        if annual_info is not None:
            self.append_model(data, Model.annual_info, annual_url, annual_info.text, classify=Model.type_detail)

        return data

    def get_detail_html_list(self, seed, session, detail_list):
        data_list = []
        rank = 1
        for item in detail_list:
            try:
                pri_pid = item.get('pripid', None)
                data = self.get_detail_html_list_qy(seed, session, pri_pid, 'qy', rank)
                if data is not None:
                    rank += 1
                    data_list.append(data)
                    continue

                self.log.warn('qy flag 获取企业信息失败 重试 gt flag..pripid = {pid}'.format(
                    pid=pri_pid))

                data = self.get_detail_html_list_qy(seed, session, pri_pid, 'gt', rank)
                if data is not None:
                    rank += 1
                    data_list.append(data)

                    self.log.warn('gt flag 获取信息成功.. pripid = {pid}'.format(
                        pid=pri_pid))
                    continue

                self.log.warn('gt flag 获取信息失败.. pripid = {pid}'.format(
                    pid=pri_pid))

            except Exception as e:
                self.log.exception(e)

        self.target_db.insert_batch_data(self.target_table, data_list)
        return len(data_list)
