#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 2019/11/11 下午2:14
# @Author  : yinxin
# @File    : util
# @Software: PyCharm

import base64
import inspect
import json
import logging
import os
import sys
import time
import uuid
from configparser import ConfigParser
from pathlib import PurePath

import requests

from common import api_url_config, error_data_config, max_send_date_retry, max_send_date_retry_time
from log import hlog
from utils.singleton import ListErrorCount, DetailErrorCount

logging.getLogger("requests").setLevel(logging.WARNING)


def send_data(api_config, source_url, htmlString, platform):
    """
    :param api_config: api的config对象
    :param source_url: 爬取的是哪个网址
    :param htmlString: 爬取结果的字符串
    :param platform: 平台
    :return: 是否发送成功
    """
    func_name = inspect.stack()[0][3]
    hlog.enter_func(func_name)

    hlog.var("source_url", source_url)

    if "" == htmlString:
        hlog.var("htmlString", htmlString)

        hlog.exit_func(func_name)
        return False

    spider_uuid = uuid.uuid1()
    hlog.var("spider_uuid", spider_uuid)

    encodedBytes = base64.b64encode(htmlString.encode("utf-8"))
    encodedStr = str(encodedBytes, "utf-8")

    data = {
        "url": source_url,
        "spiderUuid": str(spider_uuid),
        "platform": platform,
        "htmlString": encodedStr
    }

    headers = {
        "content-type": "application/json"
    }

    url = "http://%s:%s%s" % (
        api_config.target_host,
        api_config.target_port,
        api_config.target_uri
    )
    loop_time = 0
    while True:
        loop_time += 1
        if loop_time > max_send_date_retry:
            hlog.exit_func(func_name)
            return False

        response = None
        try:
            response = requests.post(url=url, data=json.dumps(data), headers=headers)
        except:
            pass

        if response:
            hlog.info("已成功发送到接口, 获取到返回状态: %s" % response.status_code)
            hlog.info("返回信息: %s" % response.text)
            response_data = json.loads(response.text)
            if response_data["code"] == "SUCCESS":
                break
            else:
                hlog.error("爬取结果发送到接口第 %d次 失败" % loop_time)
                hlog.error("准备休眠%s后重试" % max_send_date_retry_time)
                time.sleep(max_send_date_retry_time)
        else:
            hlog.error("爬取结果发送到接口第 %d次 失败" % loop_time)
            hlog.error("准备休眠%s后重试" % max_send_date_retry_time)
            time.sleep(max_send_date_retry_time)

    hlog.exit_func(func_name)
    return True


def save_html(kind, platform, source_url, htmlString):
    """
    :param kind：html的类型
    :param platform: 平台
    :param source_url: 爬取的是哪个网址
    :param htmlString: 爬取结果的字符串
    :return:
    """
    func_name = inspect.stack()[0][3]
    hlog.enter_func(func_name)
    kind_conf_map = {
        "list": error_data_config.list,
        "detail": error_data_config.detail
    }

    path = PurePath(kind_conf_map[kind]) / platform

    import os
    if not os.path.exists(path):
        os.makedirs(path)

    file_name = str(base64.b64encode(source_url.encode("utf-8")), "utf-8")
    # 将文件名中可能出现的“/”字符替换为 “#”
    file_name = file_name.replace("/", "#")
    with open(str(path / file_name), "w", encoding="utf-8") as f:
        f.write(htmlString)
    hlog.info("发送失败html保存完成, 路径: %s" % str(path / file_name))
    hlog.exit_func(func_name)


def crawl_list_url(spider_path, url):
    """
    :param spider_path:
    :param url: 要爬取的url
    :return: url的所有html字符串
    """
    func_name = inspect.stack()[0][3]
    hlog.enter_func(func_name)

    hlog.var("spider_path", spider_path)
    hlog.var("url", url)

    result = {
        "html": "",
        "next": ""
    }

    output = os.popen("node --no-warnings %s %s" % (spider_path, url)).read()

    try:
        jsonString = json.loads(output)
    except json.decoder.JSONDecodeError as e:

        hlog.error("nodejs异常, 未按约定返回json字符串")
        hlog.error(str(e))

        hlog.exit_func(func_name)
        return result

    hlog.info("爬取完成，返回状态: %s\n返回message%s" % (jsonString["code"], jsonString["message"]))

    if "success" == jsonString["code"]:
        result = {
            "html": jsonString["data"]["htmlString"],
            "next": jsonString["data"]["nextUrl"]
        }

        ListErrorCount.get_count()

    elif jsonString['code'] == "error":
        hlog.debug("爬虫爬取有误")

        error_count = ListErrorCount.get_count("error")
        stop_spider("list", error_count)

    hlog.exit_func(func_name)
    return result


def crawl_detail_url(spider_path, url):
    """
    :param spider_path:
    :param url: 要爬取的url
    :return: url的所有html字符串
    """
    func_name = inspect.stack()[0][3]
    hlog.enter_func(func_name)

    hlog.var("spider_path", spider_path)
    hlog.var("url", url)

    result = ""

    output = os.popen("node --no-warnings %s %s" % (spider_path, url)).read()

    try:
        jsonString = json.loads(output)
    except json.decoder.JSONDecodeError as e:

        hlog.error("nodejs异常, 未按约定返回json字符串")
        hlog.error(str(e))

        hlog.exit_func(func_name)
        return result

    hlog.info("爬取完成，返回状态: %s\n返回message%s" % (jsonString["code"], jsonString["message"]))

    if "success" == jsonString["code"]:
        result = jsonString["data"]
        DetailErrorCount.get_count()
    elif jsonString['code'] == "error":
        hlog.warning("爬虫爬取有误")
        hlog.warning(jsonString['message'])

        error_count = DetailErrorCount.get_count("error")
        stop_spider("detail", error_count)

    hlog.exit_func(func_name)
    return result


def get_start_url(api_config):
    """
    根据配置文件设置，获取起始网址
    :param api_config: api的config对象
    :return: url获取的网址，num需要爬取的数量
    """
    func_name = inspect.stack()[0][3]
    hlog.enter_func(func_name)

    headers = {
        "content-type": "application/json"
    }

    url = "http://%s:%s%s" % (
        api_config.source_host,
        api_config.source_port,
        api_config.source_uri
    )
    hlog.var("url", url)
    response = requests.get(url=url, headers=headers)
    if 200 == response.status_code:
        response_string = response.text
        response_json = json.loads(response_string)
        if "SUCCESS" == response_json["code"]:
            hlog.info("获取起始url成功")

            return response_json["result"]["url"], response_json["result"]["maxSize"]

    hlog.debug("获取起始url失败，请检查网络")
    hlog.exit_func(func_name)
    return "", 0


def get_platform(url):
    """
    根据url获取网站域名主体
    :param url: 网址
    :return:
    """
    func_name = inspect.stack()[0][3]
    hlog.enter_func(func_name)
    hlog.var("url", url)
    try:
        domain = url.split("/")[2]
        hlog.var("domain", domain)
        platform = domain.split(".")[1]
        hlog.var("platform", platform)
    except BaseException as e:
        print(e)
        hlog.debug("获取网站域名主体失败")
        return ""

    hlog.info("获取网站域名主体成功")
    hlog.exit_func(func_name)
    return platform


def stop_spider(spider_type, error_count):
    """
    如果爬虫被平台识别了，就停掉爬虫
    """
    func_name = inspect.stack()[0][3]
    hlog.enter_func(func_name)
    if error_count >= 5:
        hlog.warning("爬虫被平台识别，现在停止爬虫，并修改数据库中正在爬的url状态")

        config = ConfigParser()
        config.read(api_url_config.absolute(), encoding='UTF-8')
        api_url = config['api_url_config']["send_spider_modify"]

        url = str(api_url) + "?spider=" + spider_type

        headers = {
            "Content-Type": "application/json"
        }

        # 将解析的URL发送到addUrl的API接口
        hlog.info("通过接口修改数据库中的url状态")
        loop_time = 0
        while True:
            loop_time += 1
            if loop_time > max_send_date_retry:
                hlog.exit_func(func_name)
                return False

            response = None
            try:
                response = requests.get(url=url, headers=headers)
            except:
                pass

            if response:
                hlog.info("已成功发送到接口, 获取到返回状态: %s" % response.status_code)
                hlog.info("返回信息: %s" % response.text)
                response_data = json.loads(response.text)
                if response_data["code"] == "SUCCESS":
                    break
                else:
                    hlog.error("修改数据库中的url状态第 %d次 失败" % loop_time)
                    hlog.error("准备休眠%s后重试" % max_send_date_retry_time)
                    time.sleep(max_send_date_retry_time)
            else:
                hlog.error("修改数据库中的url状态第 %d次 失败" % loop_time)
                hlog.error("准备休眠%s后重试" % max_send_date_retry_time)
                time.sleep(max_send_date_retry_time)
        hlog.info("准备退出" + spider_type + "进程")

        hlog.exit_func(func_name)

        sys.exit(0)

    hlog.exit_func(func_name)


if __name__ == '__main__':
    # save_html("detail", "拉勾", "dasdad", "scfasfaf")
    from common import detail_api_config

    print(send_data(detail_api_config, "saodh", "osajdio", "asdasd"))
