#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import base64
import datetime
import inspect
import re

from bs4 import BeautifulSoup

from common import hlog
from data_handle.util import classification_level
from send_info import send_to_api
from data_handle.platform_handle import PlatformDataClean
from service.clean_error import save_html_log


class LagouDetailClean(PlatformDataClean):
    def __init__(self, load_dict):
        super().__init__()
        self.load_dict = load_dict

    def data_clean(self):
        func_name = inspect.stack()[0][3]
        hlog.enter_func(func_name)
        hlog.info("开始清洗数据")

        html = base64.b64decode(self.load_dict["htmlString"].encode("utf-8")).decode("utf-8")
        try:
            soup = BeautifulSoup(html, "lxml")
            url = self.load_dict.get("url")
            hlog.var('url', url)

            # 职位
            position = self.get_position(soup)

            if not position:
                hlog.exit_func(func_name)
                return

            # 薪资，城市，经验，学历, 工作性质
            salary, salary_min, salary_max, several, city, exp, exp_min, exp_max, edu, kind, edu_level = self.\
                get_salary_city_exp_edu_kind(soup)

            # 职位标签
            tag_list = self.get_tag_list(soup)

            # 职位诱惑
            advantage = self.get_advantage(soup)

            # 职位描述
            pos_desc = self.get_pos_desc(soup)

            # 招聘者
            hr_name, hr_position = self.get_hr(soup)

            # 公司
            company_name = self.get_company_name(soup)

            # 上班地点
            location = self.get_location(soup)

            # 公司信息
            stage, scale, scale_left, scale_right, company_main_page, publish_time, company_nature = self.\
                get_company_info(soup)

            # 来源
            src_name = self.load_dict.get("platform")

            # noinspection PyBroadException
            src_url = str(re.search("(.*?)show.*?", self.load_dict.get("url")).group(1).strip("?"))

            src_pos_id = str(re.search(".*?jobs/(.*?).html.*?", src_url).group(1))

            # spiderUuid
            spider_uuid = self.load_dict.get("spiderUuid")

            info = dict()
            info['srcName'] = src_name
            info['srcUrl'] = src_url
            info['srcPosId'] = src_pos_id
            info['position'] = position
            info['salary'] = salary
            info['salaryMin'] = salary_min
            info['salaryMax'] = salary_max
            info['several'] = several
            info['city'] = city
            info['address'] = location
            info['exp'] = exp
            info['expMin'] = exp_min
            info['expMax'] = exp_max
            info['edu'] = edu
            info['eduLevel'] = edu_level
            info['tagList'] = tag_list
            info['advantage'] = advantage
            info['posDesc'] = pos_desc
            info['hrName'] = hr_name
            info['hrPosition'] = hr_position
            info['companyName'] = company_name
            info['companyNature'] = company_nature
            info['stage'] = stage
            info['scale'] = scale
            info['scaleLeft'] = scale_left
            info['scaleRight'] = scale_right
            info['companyMainPage'] = company_main_page
            info['publishTime'] = publish_time
            info['spiderUuid'] = spider_uuid
            info['kind'] = kind

            send_data = info

            hlog.info("数据清洗成功，准备发送到API接口")
            hlog.info("清洗成功的html的URL：" + src_url)

            send_to_api("send_pos_detail", "LaGou", send_data)
        except Exception as e:
            try:
                platform = self.load_dict.get("platform")
                spider_uuid = self.load_dict.get("spiderUuid")
                url = self.load_dict.get("url")

                save_html_log(html, platform, spider_uuid, url, e)
            except Exception as exc:
                hlog.error("保存拉勾清洗失败的html出错：" + str(exc))
                hlog.error("保存拉勾清洗出错的spiderUuid: " + self.load_dict.get("spiderUuid"))
                hlog.error("保存拉勾清洗出错的html的url: " + self.load_dict.get("url"))
        finally:
            hlog.exit_func(func_name)

    @staticmethod
    def get_position(soup):
        func_name = inspect.stack()[0][3]
        hlog.enter_func(func_name)

        position_list = soup.select(".job-name .name")
        position = ""

        if position_list:
            is_position = re.search(".*?>(.*?)<.*?", str(position_list[0]))

            if is_position:
                position = is_position.group(1).strip()
        else:
            hlog.error(str(soup))
            hlog.error('拉勾爬虫：职位详情分析失败，是否被反爬机制识别了？请人工确认')

        hlog.var('position', position)
        hlog.exit_func(func_name)
        return position

    @staticmethod
    def get_salary_city_exp_edu_kind(soup):
        func_name = inspect.stack()[0][3]
        hlog.enter_func(func_name)

        money_location_exp_edu = soup.select(".job_request span")
        is_salary = re.search(".*?>(.*?)</.*?", str(money_location_exp_edu[0]))
        if is_salary:
            salary = is_salary.group(1).replace("/", "").strip()
        else:
            salary = ""

        salary_min, salary_max = 0, 0
        several = 12
        if salary:
            salary_find = re.findall(r"\d+", salary)
            is_day = re.findall("[天]", salary)
            if len(salary_find) == 1:
                salary_min = int(salary_find[0])
            elif len(salary_find) == 2:
                if is_day:
                    salary_min = (int(salary_find[0]) * 22) // 1000
                    salary_max = (int(salary_find[1]) * 22) // 1000
                else:
                    salary_min = int(salary_find[0])
                    salary_max = int(salary_find[1])

        is_city = re.search(".*?>(.*?)</.*?", str(money_location_exp_edu[1]))
        if is_city:
            city = is_city.group(1).replace("/", "").strip()
        else:
            city = ""

        is_exp = re.search(".*?>(.*?)</.*?", str(money_location_exp_edu[2]))
        if is_exp:
            exp = is_exp.group(1).replace("/", "").strip()
        else:
            exp = ""

        exp_min, exp_max = 0, 0
        if exp:
            exp_find = re.findall(r"\d+", exp)
            if len(exp_find) == 1:
                if int(exp_find[0]) >= 5:
                    exp_min = int(exp_find[0])
                else:
                    exp_max = int(exp_find[0])
            elif len(exp_find) == 2:
                exp_min = int(exp_find[0])
                exp_max = int(exp_find[1])

        is_edu = re.search(".*?>(.*?)</.*?", str(money_location_exp_edu[3]))
        if is_edu:
            edu = is_edu.group(1).replace("/", "").strip()
        else:
            edu = ""

        is_kind = re.search(".*?>(.*?)</.*?", str(money_location_exp_edu[4]))
        if is_kind:
            kind = is_kind.group(1).replace("/", "").strip()
        else:
            kind = None

        edu_level = classification_level(edu)

        hlog.exit_func(func_name)

        return salary, salary_min, salary_max, several, city, exp, exp_min, exp_max, edu, kind, edu_level

    @staticmethod
    def get_tag_list(soup):
        func_name = inspect.stack()[0][3]
        hlog.enter_func(func_name)

        tags = soup.select(".position-label li")
        tag_list = []
        for tag in tags:
            is_t = re.search(".*?>(.*?)</.*?", str(tag))
            if is_t:
                t = is_t.group(1).strip()
                tag_list.append(t)

        hlog.exit_func(func_name)

        return tag_list

    @staticmethod
    def get_advantage(soup):
        func_name = inspect.stack()[0][3]
        hlog.enter_func(func_name)

        advantage = []
        is_advantage_str = re.search(".*?>(.*?)</.*?", str(soup.select(".job-advantage p")[0]))
        if is_advantage_str:
            advantagestr = is_advantage_str.group(1)
        else:
            advantagestr = "等"
        advantage_str = advantagestr.replace("等", "")
        adv_pattern = re.compile(r"\w+")
        adv_result = re.findall(adv_pattern, advantage_str)
        if adv_result:
            for adv in adv_result:
                advantage.append(adv)

        hlog.exit_func(func_name)

        return advantage

    @staticmethod
    def get_pos_desc(soup):
        func_name = inspect.stack()[0][3]
        hlog.enter_func(func_name)

        pos_describ_s = soup.select(".job-detail p")
        if not pos_describ_s:
            pos_describ_s = soup.select(".job-detail")

        pos_desc = ""
        if pos_describ_s:
            pos_desc_list = []
            pos_desc_list_rs = []
            for pos_desc in pos_describ_s:
                is_p_d = re.search(".*?>(.*?)</.*?", str(pos_desc), re.S)
                if is_p_d:
                    p_d = is_p_d.group(1)
                else:
                    p_d = ""
                p_d_str = p_d.replace("<br/>", "").replace("\xa0", "").replace("\n", "").strip()
                pos_desc_list.append(p_d_str)

            for pos_desc_list_str in pos_desc_list:
                pos_desc_list_rs.append(pos_desc_list_str + "\n")
            pos_desc_str = "".join(pos_desc_list_rs)
            pos_desc = pos_desc_str

        hlog.exit_func(func_name)

        return pos_desc

    @staticmethod
    def get_hr(soup):
        func_name = inspect.stack()[0][3]
        hlog.enter_func(func_name)

        hr_name = soup.select(".hr_name")[0]["value"]
        hr_position = soup.select(".hr_position")[0]["value"]

        hlog.exit_func(func_name)

        return hr_name, hr_position

    @staticmethod
    def get_company_name(soup):
        func_name = inspect.stack()[0][3]
        hlog.enter_func(func_name)

        is_company = re.search(".*?>(.*?)</.*?", str(soup.select(".job-name .company")[0]))
        if is_company:
            company = is_company.group(1)
        else:
            company = "招聘"
        company_name = company.replace("招聘", "").strip()

        hlog.exit_func(func_name)

        return company_name

    @staticmethod
    def get_location(soup):
        func_name = inspect.stack()[0][3]
        hlog.enter_func(func_name)

        work_place_list = re.findall(".*?>(.*?)<.*?", str(soup.select(".work_addr")[0]), re.S)
        work_place_str = "".join(work_place_list[0:-2])
        location = work_place_str.replace("\n", "").replace(" ", "")

        hlog.exit_func(func_name)

        return location

    @staticmethod
    def get_company_info(soup):
        func_name = inspect.stack()[0][3]
        hlog.enter_func(func_name)

        # 公司性质
        company_nature = []
        is_company_nature_str = re.search(".*?icon-glyph-fourSquare.*?name\">(.*?)</.*?",
                                          str(soup.select(".c_feature")))
        if is_company_nature_str:
            company_nature_str = is_company_nature_str.group(1)
        else:
            company_nature_str = ""
        nature_pattern = re.compile(r"\w+")
        nature_result = re.findall(nature_pattern, company_nature_str.strip())
        if nature_result:
            for nature in nature_result:
                company_nature.append(nature.strip())

        # 发展阶段
        is_stage = re.search(".*?icon-glyph-trend.*?name\">(.*?)</.*?", str(soup.select(".c_feature")))
        if is_stage:
            stage = is_stage.group(1)
        else:
            stage = None

        # 规模
        is_scale = re.search(".*?icon-glyph-figure.*?name\">(.*?)</.*?", str(soup.select(".c_feature")))
        if is_scale:
            scale = is_scale.group(1).strip()
        else:
            scale = None

        scale_left, scale_right = 0, 0
        if scale:
            less = re.search(".*?少.*?", scale)
            lots = re.search(".*?上.*?", scale)
            scale_find = re.findall(r"\d+", scale)
            if less:
                scale_right = int(scale_find[0])
                scale_left = 0
            elif lots:
                scale_left = int(scale_find[0])
                scale_right = 0
            else:
                scale_left = int(scale_find[0])
                scale_right = int(scale_find[1])

        # 公司主页
        page = ""
        page_str = re.search(".*?icon-glyph-home\">(.*?)</a.*?", str(soup.select(".c_feature")), re.S)
        if page_str:
            page = re.search(".*?c_feature_name\">(.*?)</.*?", page_str.group(1), re.S)
        company_main_page = page.group(1) if page else None

        # 发布时间
        is_time = re.search(".*?>(.*?)</.*?", str(soup.select(".publish_time")[0]))
        if is_time:
            this_time = is_time.group(1)
            publish_time = this_time.split(" ")[0].strip()
            publish_time = time_handle(publish_time)
        else:
            publish_time = None

        hlog.exit_func(func_name)

        return stage, scale, scale_left, scale_right, company_main_page, publish_time, company_nature


def time_handle(publish_time):
    func_name = inspect.stack()[0][3]
    hlog.enter_func(func_name)
    hlog.var("publish_time", publish_time)

    _hour = re.search(":", publish_time)
    _day = re.search("天", publish_time)
    _date = re.search("-", publish_time)

    now = datetime.datetime.now()
    if _hour:
        return now.strftime("%Y-%m-%d")
    if _day:
        days_ago = re.search(r"\d+", publish_time).group()
        delta = datetime.timedelta(days=int(days_ago))
        return (now - delta).strftime("%Y-%m-%d")

    hlog.exit_func(func_name)
    return now.strftime("%Y-%m-%d")


if __name__ == "__main__":
    # 修复bug的时候用
    with open("this.html", "r", encoding="utf-8") as f:
        htmlString = f.read()
    l_dict = {"htmlString": htmlString,
              "url": "https://www.lagou.com/jobs/374241.html?show=679b4d61ff4a438c931008e4ae29b6c0",
              "platform": "拉勾"}
    lagou = LagouDetailClean(l_dict)
    lagou.data_clean()
