#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import re
import time
from configparser import ConfigParser
from pathlib import Path

from common import hlog, common_abs_path


def save_html_log_for_urls(html, platform, spiderUuid, url, e):
    hlog.enter_func("save_html_log_for_urls")

    time_ticks = time.strftime("%Y%m%d%H%M%S", time.localtime())
    now = time.strftime("%Y_%m_%d", time.localtime())
    is_ture = True

    platform_config = ConfigParser()
    platform_config.read(common_abs_path.absolute(), encoding='UTF-8')

    if platform == platform_config["platform"]["LaGou"]:
        pattern = ".*?com/(.*)"
        html_path = Path("failed_html") / Path("lagou") / now
    elif platform == platform_config["platform"]["ZhiPin"]:
        pattern = ".*?com/(.*)"
        html_path = Path("failed_html") / Path("zhipin") / now
    else:
        pattern = ""
        html_path = ""
        is_ture = False

    if is_ture:
        src_url_pos_sign = re.search(pattern, url).group(1).strip().replace("/", "_").replace("?", "_")

        if not os.path.exists(html_path):
            os.makedirs(html_path)

        # 将出错的html写入文件
        file_name = src_url_pos_sign + "_" + time_ticks + ".html"
        file_url_name = html_path / file_name
        with open(file_url_name, "w", encoding="utf-8") as file:
            file.write(html)
            file.close()

        hlog.error("html清洗失败啦")
        hlog.error("清洗失败的spiderUuid: " + spiderUuid)
        hlog.error("清洗失败的url: " + url)
        hlog.error("将清洗失败的html文件保存到：" + file_url_name.replace("../", ""))
        hlog.error("出错原因，详情：" + str(e))
        hlog.error("time：" + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))

    hlog.exit_func("save_html_log_for_urls")
