#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import base64
import re

from bs4 import BeautifulSoup

from common import hlog
from send_info import send_to_api
from data_handle.platform_handle import PlatformDataClean
from service.clean_error import save_html_log_for_urls


class LagouUrlClean(PlatformDataClean):
    def __init__(self, load_dict):
        super().__init__()
        self.load_dict = load_dict

    def data_clean(self):
        hlog.enter_func("LagouUrlClean.data_clean()")

        position_url_list = []

        html = base64.b64decode(self.load_dict["htmlString"].encode("utf-8")).decode("utf-8")
        try:
            soup = BeautifulSoup(html, "lxml")
            tag_a_list = soup.select("#s_position_list > ul > li > div.list_item_top > div.position > div.p_top > a")
            for tag_a in tag_a_list:
                src_id = re.search(".*?jobs/(.*?).html.*?", str(tag_a["href"])).group(1)
                position_url_list.append({"srcPosId": str(src_id), "platform": "拉勾", "url": tag_a["href"]})

            send_data = {"sourceUrl": self.load_dict["url"], "urls": position_url_list}

            hlog.info("数据清洗成功，准备发送到API接口")
            hlog.info("清洗成功的html的URL：" + self.load_dict["url"])

            send_to_api("send_url", "LaGou", send_data)

        except Exception as e:
            try:
                platform = self.load_dict.get("platform")
                spiderUuid = self.load_dict.get("spiderUuid")
                url = self.load_dict.get("url")

                save_html_log_for_urls(html, platform, spiderUuid, url, e)
            except Exception as exc:
                hlog.error("保存拉勾清洗失败的html出错：" + str(exc))
                hlog.error("保存拉勾清洗出错的spiderUuid: " + self.load_dict.get("spiderUuid"))
                hlog.error("保存拉勾清洗出错的html的url: " + self.load_dict.get("url"))
        finally:
            hlog.exit_func("LagouUrlClean.data_clean()")


if __name__ == "__main__":
    pass
