# -*- coding: utf-8 -*-
# !/usr/bin/env python
import json

import requests

from base.task_base_worker import TaskBaseWorker
from common import util
from common.global_field import Model
from common.mongo import MongDb
from config.conf import captcha_geetest_conf, encry_conf


class GsxtBaseWorker(TaskBaseWorker):
    # 搜索错误
    SEARCH_ERROR = -1
    # 搜索没有数据
    SEARCH_NOTHING_FIND = 0
    # 成功搜索到数据
    SEARCH_SUCCESS = 1

    def __init__(self, **kwargs):
        TaskBaseWorker.__init__(self, **kwargs)

        # 创建索引
        self.target_db.create_index(self.target_table, [("rank", MongDb.ASCENDING)])
        self.target_db.create_index(self.target_table, [("seed", MongDb.ASCENDING)])
        self.target_db.create_index(self.target_table, [("in_time", MongDb.ASCENDING)])
        self.source_db.create_index(self.source_table, [(self.crawl_flag, MongDb.ASCENDING)])

        # 对搜索列表信息加索引
        if self.search_table is not None:
            self.source_db.create_index(self.search_table, [(self.crawl_flag, MongDb.ASCENDING)])

    # 获得model
    @staticmethod
    def get_model(_id, rank, seed, web_site, data_list=None):
        if data_list is None:
            data_list = {}
        return dict(_id=_id, rank=rank, seed=seed, website=web_site, datalist=data_list, in_time=util.get_now_time())

    def append_model(self, model, data_type, url, text, year=None, classify=Model.type_list, post_data=None):
        '''
        添加数据
        :param post_data:
        :param year:
        :param model: 数据文档
        :param data_type: base_info or other_info
        :param classify: list or detail
        :param url: 网页url
        :param text: 网页文本
        :return:
        '''
        if model is None:
            return
        company = model.get('_id', None)
        if company is None:
            return

        # 发送数据到消息队列
        self.mq_queue_thread.push_msg(url, text, company=company, year=year)

        data_list = model.get('datalist', None)
        if not isinstance(data_list, dict):
            return

        detail_info = {'url': url, 'text': text}
        if year is not None:
            detail_info['year'] = year
        if post_data is not None:
            if isinstance(post_data, dict):
                detail_info['post_data'] = json.dumps(post_data)

        info = data_list.get(data_type, None)
        if info is None:
            info = {classify: [detail_info]}
            data_list[data_type] = info
            return
        classify_list = info.get(classify, None)
        if classify_list is None:
            info[classify] = [detail_info]
            return

        classify_list.append(detail_info)

    def home_page(self, session):
        pass

    def get_search_result_html(self, keyword, session):
        return []

    def get_detail_html_list(self, seed, session, param_list):
        return 0

    # 存储搜索列表
    def save_search_list(self, company, param_list):
        if self.search_table is not None:
            data = dict(_id=company,
                        search_list=param_list,
                        website=self.province,
                        in_time=util.get_now_time())
            # 抓取标志 还原
            data[self.crawl_flag] = 0
            self.source_db.insert_batch_data(self.search_table, [data])

    # 获得加密后的pripid
    def get_encry_pripid(self, sub_url):
        session = requests.session()
        session.headers['Content-Type'] = 'application/json'

        post_data = {"script": sub_url}

        try:
            r = session.post(encry_conf['url'], json=post_data)
            if r.status_code == 200:
                return r.text
        except Exception as e:
            self.log.exception(e)

        return None

    def get_captcha_geetest(self, url, input_selector, search, keyword, result, success=None,
                            proxy_type=1,
                            add_link=None,
                            click_first=None):
        session = requests.session()
        session.headers['Content-Type'] = 'application/json'

        post_data = {
            "url": url,
            "searchInputSelector": input_selector,
            "searchBtnSelector": search,
            "searchText": keyword,
            "resultIndicatorSelector": result,
        }
        if success is not None:
            post_data["successIndicatorSelector"] = success

        if proxy_type is not None:
            post_data["proxy"] = self.get_proxy(proxy_type=proxy_type)['http'][7:]

        if add_link is not None:
            post_data['__SPECIAL_HACK_FOR_CHONGQING__ADD_LINK__'] = add_link

        if click_first is not None:
            post_data['__SPECIAL_HACK_FOR_CHONGQING__CLICK_FIRST__'] = click_first

        try:
            r = session.post(captcha_geetest_conf['url'], json=post_data)
            if r.status_code != 200:
                self.log.warn('验证码服务请求错误: status = {status} keyword = {key}'.format(
                    status=r.status_code, key=keyword))
                return None

            json_data = util.json_loads(r.text)
            if json_data is None:
                self.log.error('json数据转换失败...text = {text}'.format(text=r.text))
                return None

            if not json_data.get('success', False):
                self.log.warn('验证码识别失败: keyword = {key}'.format(key=keyword))
                return None

            content = json_data.get('content', None)
            if content is None:
                self.log.error('找不到相对应的content字段: key = {key} text = {text}'.format(key=keyword, text=r.text))
                return None

            self.log.info('验证码破解成功: key = {key}'.format(key=keyword))
            return content
        except Exception as e:
            self.log.exception(e)
        self.log.error('验证码识别异常: key = {key}'.format(key=keyword))
        return None

    def query_company(self, item):
        try:
            company_name = item.get('_id', '')
            session = self.get_new_session(proxy_type=2)
            self.home_page(session)
            param_list, error_code = self.get_search_result_html(company_name, session)
            # 搜索数据错误
            if error_code == self.SEARCH_ERROR:
                return self.CRAWL_UN_FINISH

            # 没有搜到数据
            if error_code == self.SEARCH_NOTHING_FIND:
                return self.CRAWL_NOTHING_FIND

            # 存储搜索列表基本信息
            self.save_search_list(company_name, param_list)

            # 进一步获取详情
            list_length = len(param_list)
            result_length = self.get_detail_html_list(company_name, session, param_list)
            if result_length >= list_length:
                return self.CRAWL_FINISH
            return self.CRAWL_UN_FINISH
        except Exception as e:
            self.log.exception(e)
        return self.CRAWL_UN_FINISH

    def query_task(self, item):

        if not isinstance(item, dict):
            self.log.info('参数错误: item = {item}'.format(item=item))
            return self.FAIL

        # 判断是否需要进行抓取
        if not self.check_crawl_flag(item, self.crawl_flag):
            return self.SUCCESS

        company_name = item.get('_id', '')
        self.log.info('开始抓取任务...province = {province} company = {company}'.format(
            province=self.province, company=company_name))
        status = self.query_company(item)

        # 标记抓取状态
        self.set_crawl_flag(item, self.crawl_flag, status)
        self.log.info('完成抓取任务...province = {province} company = {company} status = {status}'.format(
            province=self.province, company=company_name, status=status))
        return self.FAIL
